2024-12-11 20:46:22,998 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad 2024-12-11 20:46:23,028 main DEBUG Took 0.025119 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-11 20:46:23,029 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-11 20:46:23,030 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-11 20:46:23,031 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-11 20:46:23,033 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 20:46:23,054 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-11 20:46:23,124 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 20:46:23,131 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 20:46:23,133 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 20:46:23,133 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 20:46:23,134 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 20:46:23,134 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 20:46:23,144 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 20:46:23,145 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 20:46:23,146 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 20:46:23,146 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 20:46:23,147 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 20:46:23,148 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 20:46:23,148 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 20:46:23,149 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 20:46:23,149 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 20:46:23,150 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 20:46:23,150 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 20:46:23,151 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 20:46:23,151 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 20:46:23,152 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 20:46:23,152 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 20:46:23,153 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 20:46:23,153 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 20:46:23,154 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 20:46:23,154 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 20:46:23,154 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-11 20:46:23,166 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 20:46:23,168 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-11 20:46:23,177 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-11 20:46:23,187 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-11 20:46:23,200 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-11 20:46:23,201 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-11 20:46:23,224 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-11 20:46:23,234 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-11 20:46:23,244 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-11 20:46:23,245 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-11 20:46:23,246 main DEBUG createAppenders(={Console}) 2024-12-11 20:46:23,250 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad initialized 2024-12-11 20:46:23,252 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad 2024-12-11 20:46:23,252 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad OK. 2024-12-11 20:46:23,253 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-11 20:46:23,259 main DEBUG OutputStream closed 2024-12-11 20:46:23,261 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-11 20:46:23,262 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-11 20:46:23,262 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@7c711375 OK 2024-12-11 20:46:23,553 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-11 20:46:23,556 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-11 20:46:23,558 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-11 20:46:23,559 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-11 20:46:23,560 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-11 20:46:23,561 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-11 20:46:23,561 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-11 20:46:23,562 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-11 20:46:23,562 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-11 20:46:23,563 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-11 20:46:23,563 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-11 20:46:23,564 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-11 20:46:23,564 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-11 20:46:23,564 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-11 20:46:23,565 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-11 20:46:23,565 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-11 20:46:23,566 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-11 20:46:23,567 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-11 20:46:23,572 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-11 20:46:23,572 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@554e218) with optional ClassLoader: null 2024-12-11 20:46:23,574 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-11 20:46:23,592 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@554e218] started OK. 2024-12-11T20:46:23,640 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-12-11 20:46:23,644 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-11 20:46:23,645 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-11T20:46:24,328 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06 2024-12-11T20:46:24,329 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestSecureExportSnapshot timeout: 13 mins 2024-12-11T20:46:24,413 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-11T20:46:24,725 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-11T20:46:24,758 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab, deleteOnExit=true 2024-12-11T20:46:24,758 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-11T20:46:24,760 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/test.cache.data in system properties and HBase conf 2024-12-11T20:46:24,761 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop.tmp.dir in system properties and HBase conf 2024-12-11T20:46:24,762 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop.log.dir in system properties and HBase conf 2024-12-11T20:46:24,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-11T20:46:24,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-11T20:46:24,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-11T20:46:24,966 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-11T20:46:24,978 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-11T20:46:24,991 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-11T20:46:24,992 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-11T20:46:24,993 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T20:46:24,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-11T20:46:24,995 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-11T20:46:24,995 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T20:46:24,996 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T20:46:24,997 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-11T20:46:24,998 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/nfs.dump.dir in system properties and HBase conf 2024-12-11T20:46:24,998 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/java.io.tmpdir in system properties and HBase conf 2024-12-11T20:46:24,999 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T20:46:25,000 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-11T20:46:25,000 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-11T20:46:27,199 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-11T20:46:27,440 INFO [Time-limited test {}] log.Log(170): Logging initialized @6424ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-11T20:46:27,601 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T20:46:27,738 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T20:46:27,877 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T20:46:27,877 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T20:46:27,880 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T20:46:27,907 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T20:46:27,917 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a1d5810{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop.log.dir/,AVAILABLE} 2024-12-11T20:46:27,920 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62b16b1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T20:46:28,343 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@71d736e3{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/java.io.tmpdir/jetty-localhost-39731-hadoop-hdfs-3_4_1-tests_jar-_-any-5363561656140303053/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-11T20:46:28,363 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4f00f70b{HTTP/1.1, (http/1.1)}{localhost:39731} 2024-12-11T20:46:28,364 INFO [Time-limited test {}] server.Server(415): Started @7349ms 2024-12-11T20:46:29,146 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T20:46:29,162 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T20:46:29,167 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T20:46:29,168 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T20:46:29,168 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T20:46:29,170 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4107ad3f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop.log.dir/,AVAILABLE} 2024-12-11T20:46:29,171 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ae01b05{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T20:46:29,351 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@323a2aa2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/java.io.tmpdir/jetty-localhost-33193-hadoop-hdfs-3_4_1-tests_jar-_-any-16845685847260095501/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T20:46:29,354 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@50927177{HTTP/1.1, (http/1.1)}{localhost:33193} 2024-12-11T20:46:29,356 INFO [Time-limited test {}] server.Server(415): Started @8341ms 2024-12-11T20:46:29,489 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T20:46:29,809 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T20:46:29,820 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T20:46:29,845 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T20:46:29,846 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T20:46:29,846 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T20:46:29,847 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3188e696{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop.log.dir/,AVAILABLE} 2024-12-11T20:46:29,851 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@71ade74d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T20:46:30,005 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@486061d4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/java.io.tmpdir/jetty-localhost-40177-hadoop-hdfs-3_4_1-tests_jar-_-any-2640872011418339891/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T20:46:30,007 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@41d36900{HTTP/1.1, (http/1.1)}{localhost:40177} 2024-12-11T20:46:30,007 INFO [Time-limited test {}] server.Server(415): Started @8992ms 2024-12-11T20:46:30,010 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T20:46:30,124 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T20:46:30,146 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T20:46:30,154 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T20:46:30,154 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T20:46:30,154 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T20:46:30,167 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@80b4649{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop.log.dir/,AVAILABLE} 2024-12-11T20:46:30,172 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ef7cd2a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T20:46:30,370 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2b930908{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/java.io.tmpdir/jetty-localhost-33915-hadoop-hdfs-3_4_1-tests_jar-_-any-4891165918622885397/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T20:46:30,386 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3c465c66{HTTP/1.1, (http/1.1)}{localhost:33915} 2024-12-11T20:46:30,391 INFO [Time-limited test {}] server.Server(415): Started @9372ms 2024-12-11T20:46:30,394 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T20:46:31,410 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data4/current/BP-464186549-172.17.0.2-1733949986605/current, will proceed with Du for space computation calculation, 2024-12-11T20:46:31,411 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data3/current/BP-464186549-172.17.0.2-1733949986605/current, will proceed with Du for space computation calculation, 2024-12-11T20:46:31,429 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data1/current/BP-464186549-172.17.0.2-1733949986605/current, will proceed with Du for space computation calculation, 2024-12-11T20:46:31,433 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data2/current/BP-464186549-172.17.0.2-1733949986605/current, will proceed with Du for space computation calculation, 2024-12-11T20:46:31,576 WARN [Thread-136 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data5/current/BP-464186549-172.17.0.2-1733949986605/current, will proceed with Du for space computation calculation, 2024-12-11T20:46:31,580 WARN [Thread-137 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data6/current/BP-464186549-172.17.0.2-1733949986605/current, will proceed with Du for space computation calculation, 2024-12-11T20:46:31,590 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T20:46:31,613 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T20:46:31,660 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T20:46:31,683 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa15ea1f18a797a26 with lease ID 0x6fd6a20e2ad20c4d: Processing first storage report for DS-58496555-acc8-4e14-a2aa-6653727f2410 from datanode DatanodeRegistration(127.0.0.1:44295, datanodeUuid=6bce5cc2-a500-49e3-aebf-6157469ea69c, infoPort=46505, infoSecurePort=0, ipcPort=38091, storageInfo=lv=-57;cid=testClusterID;nsid=1257551669;c=1733949986605) 2024-12-11T20:46:31,685 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa15ea1f18a797a26 with lease ID 0x6fd6a20e2ad20c4d: from storage DS-58496555-acc8-4e14-a2aa-6653727f2410 node DatanodeRegistration(127.0.0.1:44295, datanodeUuid=6bce5cc2-a500-49e3-aebf-6157469ea69c, infoPort=46505, infoSecurePort=0, ipcPort=38091, storageInfo=lv=-57;cid=testClusterID;nsid=1257551669;c=1733949986605), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-11T20:46:31,685 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa4a3edc82951f039 with lease ID 0x6fd6a20e2ad20c4b: Processing first storage report for DS-f27da252-d217-4063-b080-6d805699a8f4 from datanode DatanodeRegistration(127.0.0.1:46705, datanodeUuid=96d3df54-63f4-4061-b277-1b7a65df4f7b, infoPort=33225, infoSecurePort=0, ipcPort=46313, storageInfo=lv=-57;cid=testClusterID;nsid=1257551669;c=1733949986605) 2024-12-11T20:46:31,685 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa4a3edc82951f039 with lease ID 0x6fd6a20e2ad20c4b: from storage DS-f27da252-d217-4063-b080-6d805699a8f4 node DatanodeRegistration(127.0.0.1:46705, datanodeUuid=96d3df54-63f4-4061-b277-1b7a65df4f7b, infoPort=33225, infoSecurePort=0, ipcPort=46313, storageInfo=lv=-57;cid=testClusterID;nsid=1257551669;c=1733949986605), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T20:46:31,686 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8e2cb87ad9025682 with lease ID 0x6fd6a20e2ad20c4c: Processing first storage report for DS-84276d66-e551-476e-a085-4f7c53a4846b from datanode DatanodeRegistration(127.0.0.1:45301, datanodeUuid=2899ec59-41d8-4955-84e0-35d5f5e61d55, infoPort=37781, infoSecurePort=0, ipcPort=36451, storageInfo=lv=-57;cid=testClusterID;nsid=1257551669;c=1733949986605) 2024-12-11T20:46:31,686 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8e2cb87ad9025682 with lease ID 0x6fd6a20e2ad20c4c: from storage DS-84276d66-e551-476e-a085-4f7c53a4846b node DatanodeRegistration(127.0.0.1:45301, datanodeUuid=2899ec59-41d8-4955-84e0-35d5f5e61d55, infoPort=37781, infoSecurePort=0, ipcPort=36451, storageInfo=lv=-57;cid=testClusterID;nsid=1257551669;c=1733949986605), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T20:46:31,686 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa15ea1f18a797a26 with lease ID 0x6fd6a20e2ad20c4d: Processing first storage report for DS-ef1e522f-f697-42de-a997-46c88fcecc73 from datanode DatanodeRegistration(127.0.0.1:44295, datanodeUuid=6bce5cc2-a500-49e3-aebf-6157469ea69c, infoPort=46505, infoSecurePort=0, ipcPort=38091, storageInfo=lv=-57;cid=testClusterID;nsid=1257551669;c=1733949986605) 2024-12-11T20:46:31,687 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa15ea1f18a797a26 with lease ID 0x6fd6a20e2ad20c4d: from storage DS-ef1e522f-f697-42de-a997-46c88fcecc73 node DatanodeRegistration(127.0.0.1:44295, datanodeUuid=6bce5cc2-a500-49e3-aebf-6157469ea69c, infoPort=46505, infoSecurePort=0, ipcPort=38091, storageInfo=lv=-57;cid=testClusterID;nsid=1257551669;c=1733949986605), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T20:46:31,687 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa4a3edc82951f039 with lease ID 0x6fd6a20e2ad20c4b: Processing first storage report for DS-8ca43692-b5d4-492d-b735-4680296fa1e6 from datanode DatanodeRegistration(127.0.0.1:46705, datanodeUuid=96d3df54-63f4-4061-b277-1b7a65df4f7b, infoPort=33225, infoSecurePort=0, ipcPort=46313, storageInfo=lv=-57;cid=testClusterID;nsid=1257551669;c=1733949986605) 2024-12-11T20:46:31,687 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa4a3edc82951f039 with lease ID 0x6fd6a20e2ad20c4b: from storage DS-8ca43692-b5d4-492d-b735-4680296fa1e6 node DatanodeRegistration(127.0.0.1:46705, datanodeUuid=96d3df54-63f4-4061-b277-1b7a65df4f7b, infoPort=33225, infoSecurePort=0, ipcPort=46313, storageInfo=lv=-57;cid=testClusterID;nsid=1257551669;c=1733949986605), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T20:46:31,687 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8e2cb87ad9025682 with lease ID 0x6fd6a20e2ad20c4c: Processing first storage report for DS-db69bd3c-34cc-4421-9d98-efa129da21a2 from datanode DatanodeRegistration(127.0.0.1:45301, datanodeUuid=2899ec59-41d8-4955-84e0-35d5f5e61d55, infoPort=37781, infoSecurePort=0, ipcPort=36451, storageInfo=lv=-57;cid=testClusterID;nsid=1257551669;c=1733949986605) 2024-12-11T20:46:31,688 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8e2cb87ad9025682 with lease ID 0x6fd6a20e2ad20c4c: from storage DS-db69bd3c-34cc-4421-9d98-efa129da21a2 node DatanodeRegistration(127.0.0.1:45301, datanodeUuid=2899ec59-41d8-4955-84e0-35d5f5e61d55, infoPort=37781, infoSecurePort=0, ipcPort=36451, storageInfo=lv=-57;cid=testClusterID;nsid=1257551669;c=1733949986605), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T20:46:31,775 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06 2024-12-11T20:46:32,001 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/zookeeper_0, clientPort=57311, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-11T20:46:32,055 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57311 2024-12-11T20:46:32,139 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:46:32,149 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:46:32,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741825_1001 (size=7) 2024-12-11T20:46:32,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741825_1001 (size=7) 2024-12-11T20:46:32,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741825_1001 (size=7) 2024-12-11T20:46:32,867 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 with version=8 2024-12-11T20:46:32,868 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/hbase-staging 2024-12-11T20:46:33,027 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-11T20:46:33,428 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5be53b084ac7:0 server-side Connection retries=45 2024-12-11T20:46:33,449 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T20:46:33,450 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T20:46:33,495 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T20:46:33,496 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T20:46:33,497 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T20:46:33,840 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-11T20:46:33,979 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-11T20:46:33,992 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-11T20:46:33,996 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T20:46:34,035 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 104029 (auto-detected) 2024-12-11T20:46:34,037 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-11T20:46:34,069 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41779 2024-12-11T20:46:34,107 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41779 connecting to ZooKeeper ensemble=127.0.0.1:57311 2024-12-11T20:46:34,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:417790x0, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T20:46:34,255 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41779-0x10016aeea9b0000 connected 2024-12-11T20:46:34,582 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:46:34,586 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:46:34,599 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T20:46:34,618 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5, hbase.cluster.distributed=false 2024-12-11T20:46:34,699 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T20:46:34,711 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41779 2024-12-11T20:46:34,715 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41779 2024-12-11T20:46:34,716 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41779 2024-12-11T20:46:34,717 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41779 2024-12-11T20:46:34,719 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41779 2024-12-11T20:46:34,893 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5be53b084ac7:0 server-side Connection retries=45 2024-12-11T20:46:34,896 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T20:46:34,897 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T20:46:34,898 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T20:46:34,899 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T20:46:34,899 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T20:46:34,904 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T20:46:34,909 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T20:46:34,911 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40355 2024-12-11T20:46:34,914 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40355 connecting to ZooKeeper ensemble=127.0.0.1:57311 2024-12-11T20:46:34,917 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:46:34,922 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:46:34,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:403550x0, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T20:46:34,984 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:403550x0, quorum=127.0.0.1:57311, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T20:46:34,990 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40355-0x10016aeea9b0001 connected 2024-12-11T20:46:34,993 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T20:46:35,014 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T20:46:35,018 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T20:46:35,039 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T20:46:35,047 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40355 2024-12-11T20:46:35,048 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40355 2024-12-11T20:46:35,057 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40355 2024-12-11T20:46:35,073 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40355 2024-12-11T20:46:35,083 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40355 2024-12-11T20:46:35,112 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5be53b084ac7:0 server-side Connection retries=45 2024-12-11T20:46:35,113 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T20:46:35,113 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T20:46:35,120 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T20:46:35,120 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T20:46:35,120 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T20:46:35,121 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T20:46:35,121 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T20:46:35,128 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39865 2024-12-11T20:46:35,131 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39865 connecting to ZooKeeper ensemble=127.0.0.1:57311 2024-12-11T20:46:35,144 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:46:35,149 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:46:35,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:398650x0, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T20:46:35,180 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:398650x0, quorum=127.0.0.1:57311, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T20:46:35,180 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T20:46:35,187 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39865-0x10016aeea9b0002 connected 2024-12-11T20:46:35,188 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T20:46:35,190 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T20:46:35,195 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T20:46:35,203 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39865 2024-12-11T20:46:35,204 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39865 2024-12-11T20:46:35,207 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39865 2024-12-11T20:46:35,211 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39865 2024-12-11T20:46:35,212 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39865 2024-12-11T20:46:35,247 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5be53b084ac7:0 server-side Connection retries=45 2024-12-11T20:46:35,249 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T20:46:35,249 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T20:46:35,250 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T20:46:35,250 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T20:46:35,250 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T20:46:35,255 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T20:46:35,256 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T20:46:35,263 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40781 2024-12-11T20:46:35,266 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40781 connecting to ZooKeeper ensemble=127.0.0.1:57311 2024-12-11T20:46:35,270 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:46:35,276 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:46:35,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:407810x0, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T20:46:35,315 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:407810x0, quorum=127.0.0.1:57311, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T20:46:35,315 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40781-0x10016aeea9b0003 connected 2024-12-11T20:46:35,316 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T20:46:35,327 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T20:46:35,332 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T20:46:35,338 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T20:46:35,346 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40781 2024-12-11T20:46:35,347 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40781 2024-12-11T20:46:35,356 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40781 2024-12-11T20:46:35,358 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40781 2024-12-11T20:46:35,358 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40781 2024-12-11T20:46:35,407 DEBUG [M:0;5be53b084ac7:41779 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5be53b084ac7:41779 2024-12-11T20:46:35,424 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/5be53b084ac7,41779,1733949993169 2024-12-11T20:46:35,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T20:46:35,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T20:46:35,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T20:46:35,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T20:46:35,492 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/5be53b084ac7,41779,1733949993169 2024-12-11T20:46:35,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T20:46:35,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T20:46:35,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:46:35,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:46:35,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:46:35,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T20:46:35,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:46:35,548 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-11T20:46:35,549 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/5be53b084ac7,41779,1733949993169 from backup master directory 2024-12-11T20:46:35,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T20:46:35,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T20:46:35,588 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T20:46:35,588 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/5be53b084ac7,41779,1733949993169 2024-12-11T20:46:35,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T20:46:35,589 WARN [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T20:46:35,589 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5be53b084ac7,41779,1733949993169 2024-12-11T20:46:35,602 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-11T20:46:35,607 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-11T20:46:35,810 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/hbase.id] with ID: 70de47e7-5307-4e5f-941e-8ba89cc45c97 2024-12-11T20:46:35,810 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.tmp/hbase.id 2024-12-11T20:46:35,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741826_1002 (size=42) 2024-12-11T20:46:35,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741826_1002 (size=42) 2024-12-11T20:46:35,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741826_1002 (size=42) 2024-12-11T20:46:35,916 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.tmp/hbase.id]:[hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/hbase.id] 2024-12-11T20:46:36,038 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:46:36,045 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-11T20:46:36,094 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 43ms. 2024-12-11T20:46:36,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:46:36,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:46:36,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:46:36,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:46:36,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741827_1003 (size=196) 2024-12-11T20:46:36,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741827_1003 (size=196) 2024-12-11T20:46:36,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741827_1003 (size=196) 2024-12-11T20:46:36,639 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T20:46:36,643 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-11T20:46:36,664 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T20:46:36,673 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T20:46:36,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741828_1004 (size=1189) 2024-12-11T20:46:36,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741828_1004 (size=1189) 2024-12-11T20:46:36,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741828_1004 (size=1189) 2024-12-11T20:46:37,193 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/MasterData/data/master/store 2024-12-11T20:46:37,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741829_1005 (size=34) 2024-12-11T20:46:37,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741829_1005 (size=34) 2024-12-11T20:46:37,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741829_1005 (size=34) 2024-12-11T20:46:37,679 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-11T20:46:37,684 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:46:37,686 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T20:46:37,686 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T20:46:37,686 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T20:46:37,688 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T20:46:37,689 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T20:46:37,689 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T20:46:37,690 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733949997685Disabling compacts and flushes for region at 1733949997685Disabling writes for close at 1733949997688 (+3 ms)Writing region close event to WAL at 1733949997689 (+1 ms)Closed at 1733949997689 2024-12-11T20:46:37,694 WARN [master/5be53b084ac7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/MasterData/data/master/store/.initializing 2024-12-11T20:46:37,694 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/MasterData/WALs/5be53b084ac7,41779,1733949993169 2024-12-11T20:46:37,713 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T20:46:37,766 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5be53b084ac7%2C41779%2C1733949993169, suffix=, logDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/MasterData/WALs/5be53b084ac7,41779,1733949993169, archiveDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/MasterData/oldWALs, maxLogs=10 2024-12-11T20:46:37,811 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/MasterData/WALs/5be53b084ac7,41779,1733949993169/5be53b084ac7%2C41779%2C1733949993169.1733949997773, exclude list is [], retry=0 2024-12-11T20:46:37,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44295,DS-58496555-acc8-4e14-a2aa-6653727f2410,DISK] 2024-12-11T20:46:37,847 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46705,DS-f27da252-d217-4063-b080-6d805699a8f4,DISK] 2024-12-11T20:46:37,847 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45301,DS-84276d66-e551-476e-a085-4f7c53a4846b,DISK] 2024-12-11T20:46:37,850 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-11T20:46:37,933 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/MasterData/WALs/5be53b084ac7,41779,1733949993169/5be53b084ac7%2C41779%2C1733949993169.1733949997773 2024-12-11T20:46:37,935 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33225:33225),(127.0.0.1/127.0.0.1:37781:37781),(127.0.0.1/127.0.0.1:46505:46505)] 2024-12-11T20:46:37,936 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-11T20:46:37,937 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:46:37,950 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:46:37,952 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:46:38,035 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:46:38,072 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-11T20:46:38,079 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:46:38,083 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T20:46:38,084 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:46:38,093 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-11T20:46:38,093 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:46:38,095 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:46:38,095 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:46:38,105 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-11T20:46:38,106 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:46:38,111 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:46:38,112 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:46:38,119 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-11T20:46:38,120 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:46:38,121 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:46:38,122 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:46:38,130 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:46:38,132 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:46:38,140 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:46:38,141 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:46:38,147 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T20:46:38,155 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T20:46:38,167 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:46:38,169 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62837909, jitterRate=-0.06364218890666962}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T20:46:38,177 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733949997991Initializing all the Stores at 1733949998000 (+9 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733949998001 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733949998003 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733949998003Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733949998003Cleaning up temporary data from old regions at 1733949998141 (+138 ms)Region opened successfully at 1733949998177 (+36 ms) 2024-12-11T20:46:38,180 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-11T20:46:38,240 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@327ccadf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5be53b084ac7/172.17.0.2:0 2024-12-11T20:46:38,302 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-11T20:46:38,324 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-11T20:46:38,325 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-11T20:46:38,329 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-11T20:46:38,335 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-11T20:46:38,344 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 8 msec 2024-12-11T20:46:38,344 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-11T20:46:38,400 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-11T20:46:38,417 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-11T20:46:38,435 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-11T20:46:38,442 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-11T20:46:38,449 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-11T20:46:38,460 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-11T20:46:38,465 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-11T20:46:38,473 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-11T20:46:38,486 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-11T20:46:38,491 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-11T20:46:38,504 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-11T20:46:38,539 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-11T20:46:38,549 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-11T20:46:38,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T20:46:38,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T20:46:38,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:46:38,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:46:38,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T20:46:38,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:46:38,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T20:46:38,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:46:38,578 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5be53b084ac7,41779,1733949993169, sessionid=0x10016aeea9b0000, setting cluster-up flag (Was=false) 2024-12-11T20:46:38,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:46:38,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:46:38,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:46:38,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:46:38,667 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-11T20:46:38,671 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5be53b084ac7,41779,1733949993169 2024-12-11T20:46:38,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:46:38,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:46:38,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:46:38,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:46:38,735 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-11T20:46:38,739 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5be53b084ac7,41779,1733949993169 2024-12-11T20:46:38,755 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-11T20:46:38,791 INFO [RS:0;5be53b084ac7:40355 {}] regionserver.HRegionServer(746): ClusterId : 70de47e7-5307-4e5f-941e-8ba89cc45c97 2024-12-11T20:46:38,795 INFO [RS:2;5be53b084ac7:40781 {}] regionserver.HRegionServer(746): ClusterId : 70de47e7-5307-4e5f-941e-8ba89cc45c97 2024-12-11T20:46:38,796 DEBUG [RS:0;5be53b084ac7:40355 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T20:46:38,797 DEBUG [RS:2;5be53b084ac7:40781 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T20:46:38,800 INFO [RS:1;5be53b084ac7:39865 {}] regionserver.HRegionServer(746): ClusterId : 70de47e7-5307-4e5f-941e-8ba89cc45c97 2024-12-11T20:46:38,800 DEBUG [RS:1;5be53b084ac7:39865 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T20:46:38,805 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] master.HMaster(3441): Registered master coprocessor service: service=AccessControlService 2024-12-11T20:46:38,812 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:46:38,813 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-12-11T20:46:38,863 DEBUG [RS:2;5be53b084ac7:40781 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T20:46:38,863 DEBUG [RS:2;5be53b084ac7:40781 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T20:46:38,886 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-11T20:46:38,899 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-11T20:46:38,906 DEBUG [RS:1;5be53b084ac7:39865 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T20:46:38,906 DEBUG [RS:0;5be53b084ac7:40355 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T20:46:38,906 DEBUG [RS:1;5be53b084ac7:39865 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T20:46:38,906 DEBUG [RS:0;5be53b084ac7:40355 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T20:46:38,910 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-11T20:46:38,919 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5be53b084ac7,41779,1733949993169 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-11T20:46:39,008 DEBUG [RS:2;5be53b084ac7:40781 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T20:46:39,009 DEBUG [RS:2;5be53b084ac7:40781 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b0d6ad5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5be53b084ac7/172.17.0.2:0 2024-12-11T20:46:39,011 DEBUG [RS:0;5be53b084ac7:40355 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T20:46:39,012 DEBUG [RS:0;5be53b084ac7:40355 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7969ec49, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5be53b084ac7/172.17.0.2:0 2024-12-11T20:46:39,013 DEBUG [RS:1;5be53b084ac7:39865 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T20:46:39,013 DEBUG [RS:1;5be53b084ac7:39865 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19097e69, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5be53b084ac7/172.17.0.2:0 2024-12-11T20:46:39,040 DEBUG [RS:0;5be53b084ac7:40355 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5be53b084ac7:40355 2024-12-11T20:46:39,044 DEBUG [RS:2;5be53b084ac7:40781 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;5be53b084ac7:40781 2024-12-11T20:46:39,045 INFO [RS:2;5be53b084ac7:40781 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T20:46:39,045 INFO [RS:2;5be53b084ac7:40781 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T20:46:39,046 DEBUG [RS:2;5be53b084ac7:40781 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-11T20:46:39,046 INFO [RS:2;5be53b084ac7:40781 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:46:39,046 INFO [RS:0;5be53b084ac7:40355 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T20:46:39,047 INFO [RS:0;5be53b084ac7:40355 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T20:46:39,047 DEBUG [RS:0;5be53b084ac7:40355 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-11T20:46:39,047 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5be53b084ac7:0, corePoolSize=5, maxPoolSize=5 2024-12-11T20:46:39,047 INFO [RS:0;5be53b084ac7:40355 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:46:39,047 DEBUG [RS:0;5be53b084ac7:40355 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T20:46:39,047 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5be53b084ac7:0, corePoolSize=5, maxPoolSize=5 2024-12-11T20:46:39,047 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5be53b084ac7:0, corePoolSize=5, maxPoolSize=5 2024-12-11T20:46:39,048 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5be53b084ac7:0, corePoolSize=5, maxPoolSize=5 2024-12-11T20:46:39,048 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5be53b084ac7:0, corePoolSize=10, maxPoolSize=10 2024-12-11T20:46:39,048 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:39,048 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5be53b084ac7:0, corePoolSize=2, maxPoolSize=2 2024-12-11T20:46:39,048 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:39,050 INFO [RS:0;5be53b084ac7:40355 {}] regionserver.HRegionServer(2659): reportForDuty to master=5be53b084ac7,41779,1733949993169 with port=40355, startcode=1733949994820 2024-12-11T20:46:39,046 DEBUG [RS:2;5be53b084ac7:40781 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T20:46:39,052 DEBUG [RS:1;5be53b084ac7:39865 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;5be53b084ac7:39865 2024-12-11T20:46:39,053 INFO [RS:1;5be53b084ac7:39865 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T20:46:39,053 INFO [RS:2;5be53b084ac7:40781 {}] regionserver.HRegionServer(2659): reportForDuty to master=5be53b084ac7,41779,1733949993169 with port=40781, startcode=1733949995244 2024-12-11T20:46:39,053 INFO [RS:1;5be53b084ac7:39865 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T20:46:39,053 DEBUG [RS:1;5be53b084ac7:39865 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-11T20:46:39,054 INFO [RS:1;5be53b084ac7:39865 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:46:39,054 DEBUG [RS:1;5be53b084ac7:39865 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T20:46:39,063 INFO [RS:1;5be53b084ac7:39865 {}] regionserver.HRegionServer(2659): reportForDuty to master=5be53b084ac7,41779,1733949993169 with port=39865, startcode=1733949995112 2024-12-11T20:46:39,071 DEBUG [RS:2;5be53b084ac7:40781 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T20:46:39,071 DEBUG [RS:0;5be53b084ac7:40355 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T20:46:39,074 DEBUG [RS:1;5be53b084ac7:39865 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T20:46:39,081 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T20:46:39,082 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-11T20:46:39,087 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733950029087 2024-12-11T20:46:39,089 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-11T20:46:39,090 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-11T20:46:39,094 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:46:39,094 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-11T20:46:39,094 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-11T20:46:39,095 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-11T20:46:39,103 WARN [IPC Server handler 4 on default port 36717 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T20:46:39,103 WARN [IPC Server handler 4 on default port 36717 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-11T20:46:39,104 WARN [IPC Server handler 4 on default port 36717 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-11T20:46:39,107 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-11T20:46:39,107 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-11T20:46:39,123 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:39,143 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-11T20:46:39,146 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55179, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T20:46:39,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741831_1007 (size=1321) 2024-12-11T20:46:39,149 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34285, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T20:46:39,151 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-11T20:46:39,152 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-11T20:46:39,155 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41779 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-11T20:46:39,156 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57097, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T20:46:39,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741831_1007 (size=1321) 2024-12-11T20:46:39,159 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-11T20:46:39,160 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-11T20:46:39,162 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-11T20:46:39,162 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5be53b084ac7:0:becomeActiveMaster-HFileCleaner.large.0-1733949999162,5,FailOnTimeoutGroup] 2024-12-11T20:46:39,163 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:46:39,163 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41779 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-11T20:46:39,165 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41779 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-11T20:46:39,167 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5be53b084ac7:0:becomeActiveMaster-HFileCleaner.small.0-1733949999162,5,FailOnTimeoutGroup] 2024-12-11T20:46:39,167 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:39,168 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-11T20:46:39,169 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:39,170 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:39,170 WARN [IPC Server handler 0 on default port 36717 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T20:46:39,170 WARN [IPC Server handler 0 on default port 36717 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-11T20:46:39,171 WARN [IPC Server handler 0 on default port 36717 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-11T20:46:39,190 DEBUG [RS:0;5be53b084ac7:40355 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-11T20:46:39,191 WARN [RS:0;5be53b084ac7:40355 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-11T20:46:39,191 DEBUG [RS:2;5be53b084ac7:40781 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-11T20:46:39,191 WARN [RS:2;5be53b084ac7:40781 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-11T20:46:39,191 DEBUG [RS:1;5be53b084ac7:39865 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-11T20:46:39,191 WARN [RS:1;5be53b084ac7:39865 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-11T20:46:39,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741832_1008 (size=32) 2024-12-11T20:46:39,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741832_1008 (size=32) 2024-12-11T20:46:39,213 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:46:39,223 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T20:46:39,227 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T20:46:39,227 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:46:39,229 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T20:46:39,231 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-11T20:46:39,236 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-11T20:46:39,236 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:46:39,240 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T20:46:39,240 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T20:46:39,246 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T20:46:39,246 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:46:39,248 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T20:46:39,248 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T20:46:39,253 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T20:46:39,253 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:46:39,255 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T20:46:39,255 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-11T20:46:39,258 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/meta/1588230740 2024-12-11T20:46:39,264 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/meta/1588230740 2024-12-11T20:46:39,281 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-11T20:46:39,282 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-11T20:46:39,284 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T20:46:39,291 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-11T20:46:39,292 INFO [RS:1;5be53b084ac7:39865 {}] regionserver.HRegionServer(2659): reportForDuty to master=5be53b084ac7,41779,1733949993169 with port=39865, startcode=1733949995112 2024-12-11T20:46:39,293 INFO [RS:2;5be53b084ac7:40781 {}] regionserver.HRegionServer(2659): reportForDuty to master=5be53b084ac7,41779,1733949993169 with port=40781, startcode=1733949995244 2024-12-11T20:46:39,293 INFO [RS:0;5be53b084ac7:40355 {}] regionserver.HRegionServer(2659): reportForDuty to master=5be53b084ac7,41779,1733949993169 with port=40355, startcode=1733949994820 2024-12-11T20:46:39,295 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41779 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5be53b084ac7,40781,1733949995244 2024-12-11T20:46:39,298 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41779 {}] master.ServerManager(517): Registering regionserver=5be53b084ac7,40781,1733949995244 2024-12-11T20:46:39,303 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:46:39,305 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74227584, jitterRate=0.10607719421386719}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T20:46:39,309 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733949999213Initializing all the Stores at 1733949999216 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733949999216Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733949999223 (+7 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733949999223Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733949999223Cleaning up temporary data from old regions at 1733949999282 (+59 ms)Region opened successfully at 1733949999309 (+27 ms) 2024-12-11T20:46:39,309 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-11T20:46:39,309 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-11T20:46:39,309 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-11T20:46:39,309 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41779 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5be53b084ac7,40355,1733949994820 2024-12-11T20:46:39,309 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T20:46:39,309 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T20:46:39,309 DEBUG [RS:2;5be53b084ac7:40781 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:46:39,310 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41779 {}] master.ServerManager(517): Registering regionserver=5be53b084ac7,40355,1733949994820 2024-12-11T20:46:39,310 DEBUG [RS:2;5be53b084ac7:40781 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36717 2024-12-11T20:46:39,310 DEBUG [RS:2;5be53b084ac7:40781 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T20:46:39,315 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41779 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5be53b084ac7,39865,1733949995112 2024-12-11T20:46:39,315 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-11T20:46:39,316 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733949999309Disabling compacts and flushes for region at 1733949999309Disabling writes for close at 1733949999309Writing region close event to WAL at 1733949999315 (+6 ms)Closed at 1733949999315 2024-12-11T20:46:39,316 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41779 {}] master.ServerManager(517): Registering regionserver=5be53b084ac7,39865,1733949995112 2024-12-11T20:46:39,316 DEBUG [RS:0;5be53b084ac7:40355 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:46:39,316 DEBUG [RS:0;5be53b084ac7:40355 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36717 2024-12-11T20:46:39,316 DEBUG [RS:0;5be53b084ac7:40355 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T20:46:39,322 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T20:46:39,322 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-11T20:46:39,323 DEBUG [RS:1;5be53b084ac7:39865 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:46:39,323 DEBUG [RS:1;5be53b084ac7:39865 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36717 2024-12-11T20:46:39,323 DEBUG [RS:1;5be53b084ac7:39865 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T20:46:39,332 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-11T20:46:39,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T20:46:39,356 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-11T20:46:39,361 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-11T20:46:39,512 WARN [5be53b084ac7:41779 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-11T20:46:39,885 DEBUG [RS:1;5be53b084ac7:39865 {}] zookeeper.ZKUtil(111): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5be53b084ac7,39865,1733949995112 2024-12-11T20:46:39,885 DEBUG [RS:2;5be53b084ac7:40781 {}] zookeeper.ZKUtil(111): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5be53b084ac7,40781,1733949995244 2024-12-11T20:46:39,885 WARN [RS:1;5be53b084ac7:39865 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T20:46:39,885 WARN [RS:2;5be53b084ac7:40781 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T20:46:39,885 INFO [RS:1;5be53b084ac7:39865 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T20:46:39,885 INFO [RS:2;5be53b084ac7:40781 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T20:46:39,885 DEBUG [RS:1;5be53b084ac7:39865 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/WALs/5be53b084ac7,39865,1733949995112 2024-12-11T20:46:39,885 DEBUG [RS:2;5be53b084ac7:40781 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/WALs/5be53b084ac7,40781,1733949995244 2024-12-11T20:46:39,887 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5be53b084ac7,40355,1733949994820] 2024-12-11T20:46:39,887 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5be53b084ac7,39865,1733949995112] 2024-12-11T20:46:39,887 DEBUG [RS:0;5be53b084ac7:40355 {}] zookeeper.ZKUtil(111): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5be53b084ac7,40355,1733949994820 2024-12-11T20:46:39,887 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5be53b084ac7,40781,1733949995244] 2024-12-11T20:46:39,887 WARN [RS:0;5be53b084ac7:40355 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T20:46:39,887 INFO [RS:0;5be53b084ac7:40355 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T20:46:39,888 DEBUG [RS:0;5be53b084ac7:40355 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/WALs/5be53b084ac7,40355,1733949994820 2024-12-11T20:46:39,939 INFO [RS:2;5be53b084ac7:40781 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T20:46:39,939 INFO [RS:0;5be53b084ac7:40355 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T20:46:39,940 INFO [RS:1;5be53b084ac7:39865 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T20:46:39,979 INFO [RS:2;5be53b084ac7:40781 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T20:46:39,985 INFO [RS:1;5be53b084ac7:39865 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T20:46:39,992 INFO [RS:0;5be53b084ac7:40355 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T20:46:40,002 INFO [RS:0;5be53b084ac7:40355 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T20:46:40,002 INFO [RS:1;5be53b084ac7:39865 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T20:46:40,002 INFO [RS:1;5be53b084ac7:39865 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,002 INFO [RS:0;5be53b084ac7:40355 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,003 INFO [RS:2;5be53b084ac7:40781 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T20:46:40,003 INFO [RS:2;5be53b084ac7:40781 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,005 INFO [RS:1;5be53b084ac7:39865 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T20:46:40,015 INFO [RS:2;5be53b084ac7:40781 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T20:46:40,016 INFO [RS:1;5be53b084ac7:39865 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T20:46:40,016 INFO [RS:0;5be53b084ac7:40355 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T20:46:40,031 INFO [RS:2;5be53b084ac7:40781 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T20:46:40,035 INFO [RS:0;5be53b084ac7:40355 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T20:46:40,037 INFO [RS:0;5be53b084ac7:40355 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,037 DEBUG [RS:0;5be53b084ac7:40355 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,037 INFO [RS:2;5be53b084ac7:40781 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,037 DEBUG [RS:0;5be53b084ac7:40355 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,038 DEBUG [RS:0;5be53b084ac7:40355 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,038 DEBUG [RS:2;5be53b084ac7:40781 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,038 DEBUG [RS:2;5be53b084ac7:40781 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,038 DEBUG [RS:0;5be53b084ac7:40355 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,038 DEBUG [RS:2;5be53b084ac7:40781 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,038 DEBUG [RS:0;5be53b084ac7:40355 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,038 DEBUG [RS:2;5be53b084ac7:40781 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,038 DEBUG [RS:0;5be53b084ac7:40355 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5be53b084ac7:0, corePoolSize=2, maxPoolSize=2 2024-12-11T20:46:40,038 DEBUG [RS:2;5be53b084ac7:40781 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,038 DEBUG [RS:0;5be53b084ac7:40355 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,038 DEBUG [RS:2;5be53b084ac7:40781 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5be53b084ac7:0, corePoolSize=2, maxPoolSize=2 2024-12-11T20:46:40,038 DEBUG [RS:0;5be53b084ac7:40355 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,038 DEBUG [RS:2;5be53b084ac7:40781 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,038 DEBUG [RS:0;5be53b084ac7:40355 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,039 DEBUG [RS:2;5be53b084ac7:40781 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,039 DEBUG [RS:0;5be53b084ac7:40355 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,039 DEBUG [RS:2;5be53b084ac7:40781 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,039 DEBUG [RS:2;5be53b084ac7:40781 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,039 DEBUG [RS:0;5be53b084ac7:40355 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,039 DEBUG [RS:2;5be53b084ac7:40781 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,039 DEBUG [RS:0;5be53b084ac7:40355 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,039 DEBUG [RS:2;5be53b084ac7:40781 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,039 DEBUG [RS:0;5be53b084ac7:40355 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0, corePoolSize=3, maxPoolSize=3 2024-12-11T20:46:40,039 DEBUG [RS:2;5be53b084ac7:40781 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0, corePoolSize=3, maxPoolSize=3 2024-12-11T20:46:40,039 DEBUG [RS:0;5be53b084ac7:40355 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0, corePoolSize=3, maxPoolSize=3 2024-12-11T20:46:40,039 DEBUG [RS:2;5be53b084ac7:40781 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0, corePoolSize=3, maxPoolSize=3 2024-12-11T20:46:40,039 INFO [RS:1;5be53b084ac7:39865 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,040 DEBUG [RS:1;5be53b084ac7:39865 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,040 DEBUG [RS:1;5be53b084ac7:39865 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,040 DEBUG [RS:1;5be53b084ac7:39865 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,040 DEBUG [RS:1;5be53b084ac7:39865 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,040 DEBUG [RS:1;5be53b084ac7:39865 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,040 DEBUG [RS:1;5be53b084ac7:39865 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5be53b084ac7:0, corePoolSize=2, maxPoolSize=2 2024-12-11T20:46:40,040 DEBUG [RS:1;5be53b084ac7:39865 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,041 DEBUG [RS:1;5be53b084ac7:39865 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,041 DEBUG [RS:1;5be53b084ac7:39865 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,041 DEBUG [RS:1;5be53b084ac7:39865 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,041 DEBUG [RS:1;5be53b084ac7:39865 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,041 DEBUG [RS:1;5be53b084ac7:39865 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5be53b084ac7:0, corePoolSize=1, maxPoolSize=1 2024-12-11T20:46:40,041 DEBUG [RS:1;5be53b084ac7:39865 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0, corePoolSize=3, maxPoolSize=3 2024-12-11T20:46:40,041 DEBUG [RS:1;5be53b084ac7:39865 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5be53b084ac7:0, corePoolSize=3, maxPoolSize=3 2024-12-11T20:46:40,082 INFO [RS:0;5be53b084ac7:40355 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,083 INFO [RS:0;5be53b084ac7:40355 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,083 INFO [RS:0;5be53b084ac7:40355 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,083 INFO [RS:0;5be53b084ac7:40355 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,083 INFO [RS:0;5be53b084ac7:40355 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,083 INFO [RS:0;5be53b084ac7:40355 {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,40355,1733949994820-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T20:46:40,096 INFO [RS:1;5be53b084ac7:39865 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,096 INFO [RS:1;5be53b084ac7:39865 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,097 INFO [RS:1;5be53b084ac7:39865 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,097 INFO [RS:1;5be53b084ac7:39865 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,097 INFO [RS:1;5be53b084ac7:39865 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,097 INFO [RS:1;5be53b084ac7:39865 {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,39865,1733949995112-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T20:46:40,107 INFO [RS:2;5be53b084ac7:40781 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,107 INFO [RS:2;5be53b084ac7:40781 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,107 INFO [RS:2;5be53b084ac7:40781 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,107 INFO [RS:2;5be53b084ac7:40781 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,108 INFO [RS:2;5be53b084ac7:40781 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,108 INFO [RS:2;5be53b084ac7:40781 {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,40781,1733949995244-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T20:46:40,173 INFO [RS:1;5be53b084ac7:39865 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T20:46:40,175 INFO [RS:2;5be53b084ac7:40781 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T20:46:40,175 INFO [RS:0;5be53b084ac7:40355 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T20:46:40,175 INFO [RS:0;5be53b084ac7:40355 {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,40355,1733949994820-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,176 INFO [RS:0;5be53b084ac7:40355 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,176 INFO [RS:0;5be53b084ac7:40355 {}] regionserver.Replication(171): 5be53b084ac7,40355,1733949994820 started 2024-12-11T20:46:40,179 INFO [RS:2;5be53b084ac7:40781 {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,40781,1733949995244-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,180 INFO [RS:2;5be53b084ac7:40781 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,180 INFO [RS:2;5be53b084ac7:40781 {}] regionserver.Replication(171): 5be53b084ac7,40781,1733949995244 started 2024-12-11T20:46:40,184 INFO [RS:1;5be53b084ac7:39865 {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,39865,1733949995112-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,184 INFO [RS:1;5be53b084ac7:39865 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,184 INFO [RS:1;5be53b084ac7:39865 {}] regionserver.Replication(171): 5be53b084ac7,39865,1733949995112 started 2024-12-11T20:46:40,208 INFO [RS:0;5be53b084ac7:40355 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,209 INFO [RS:1;5be53b084ac7:39865 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,209 INFO [RS:0;5be53b084ac7:40355 {}] regionserver.HRegionServer(1482): Serving as 5be53b084ac7,40355,1733949994820, RpcServer on 5be53b084ac7/172.17.0.2:40355, sessionid=0x10016aeea9b0001 2024-12-11T20:46:40,209 INFO [RS:1;5be53b084ac7:39865 {}] regionserver.HRegionServer(1482): Serving as 5be53b084ac7,39865,1733949995112, RpcServer on 5be53b084ac7/172.17.0.2:39865, sessionid=0x10016aeea9b0002 2024-12-11T20:46:40,210 DEBUG [RS:0;5be53b084ac7:40355 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T20:46:40,210 DEBUG [RS:0;5be53b084ac7:40355 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5be53b084ac7,40355,1733949994820 2024-12-11T20:46:40,210 DEBUG [RS:1;5be53b084ac7:39865 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T20:46:40,210 DEBUG [RS:1;5be53b084ac7:39865 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5be53b084ac7,39865,1733949995112 2024-12-11T20:46:40,210 DEBUG [RS:0;5be53b084ac7:40355 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5be53b084ac7,40355,1733949994820' 2024-12-11T20:46:40,210 DEBUG [RS:1;5be53b084ac7:39865 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5be53b084ac7,39865,1733949995112' 2024-12-11T20:46:40,210 DEBUG [RS:0;5be53b084ac7:40355 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T20:46:40,211 DEBUG [RS:1;5be53b084ac7:39865 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T20:46:40,212 DEBUG [RS:0;5be53b084ac7:40355 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T20:46:40,219 DEBUG [RS:0;5be53b084ac7:40355 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T20:46:40,219 DEBUG [RS:0;5be53b084ac7:40355 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T20:46:40,220 DEBUG [RS:0;5be53b084ac7:40355 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5be53b084ac7,40355,1733949994820 2024-12-11T20:46:40,220 DEBUG [RS:0;5be53b084ac7:40355 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5be53b084ac7,40355,1733949994820' 2024-12-11T20:46:40,220 DEBUG [RS:0;5be53b084ac7:40355 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T20:46:40,222 INFO [RS:2;5be53b084ac7:40781 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:40,222 INFO [RS:2;5be53b084ac7:40781 {}] regionserver.HRegionServer(1482): Serving as 5be53b084ac7,40781,1733949995244, RpcServer on 5be53b084ac7/172.17.0.2:40781, sessionid=0x10016aeea9b0003 2024-12-11T20:46:40,223 DEBUG [RS:2;5be53b084ac7:40781 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T20:46:40,223 DEBUG [RS:2;5be53b084ac7:40781 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5be53b084ac7,40781,1733949995244 2024-12-11T20:46:40,223 DEBUG [RS:1;5be53b084ac7:39865 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T20:46:40,223 DEBUG [RS:2;5be53b084ac7:40781 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5be53b084ac7,40781,1733949995244' 2024-12-11T20:46:40,223 DEBUG [RS:2;5be53b084ac7:40781 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T20:46:40,228 DEBUG [RS:1;5be53b084ac7:39865 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T20:46:40,228 DEBUG [RS:1;5be53b084ac7:39865 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T20:46:40,228 DEBUG [RS:1;5be53b084ac7:39865 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5be53b084ac7,39865,1733949995112 2024-12-11T20:46:40,228 DEBUG [RS:1;5be53b084ac7:39865 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5be53b084ac7,39865,1733949995112' 2024-12-11T20:46:40,228 DEBUG [RS:1;5be53b084ac7:39865 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T20:46:40,228 DEBUG [RS:2;5be53b084ac7:40781 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T20:46:40,229 DEBUG [RS:1;5be53b084ac7:39865 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T20:46:40,231 DEBUG [RS:2;5be53b084ac7:40781 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T20:46:40,231 DEBUG [RS:1;5be53b084ac7:39865 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T20:46:40,231 DEBUG [RS:2;5be53b084ac7:40781 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T20:46:40,231 DEBUG [RS:2;5be53b084ac7:40781 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5be53b084ac7,40781,1733949995244 2024-12-11T20:46:40,231 INFO [RS:1;5be53b084ac7:39865 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T20:46:40,231 DEBUG [RS:2;5be53b084ac7:40781 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5be53b084ac7,40781,1733949995244' 2024-12-11T20:46:40,231 INFO [RS:1;5be53b084ac7:39865 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T20:46:40,231 DEBUG [RS:2;5be53b084ac7:40781 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T20:46:40,231 DEBUG [RS:0;5be53b084ac7:40355 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T20:46:40,233 DEBUG [RS:2;5be53b084ac7:40781 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T20:46:40,234 DEBUG [RS:0;5be53b084ac7:40355 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T20:46:40,234 DEBUG [RS:2;5be53b084ac7:40781 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T20:46:40,234 INFO [RS:0;5be53b084ac7:40355 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T20:46:40,234 INFO [RS:2;5be53b084ac7:40781 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T20:46:40,234 INFO [RS:0;5be53b084ac7:40355 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T20:46:40,234 INFO [RS:2;5be53b084ac7:40781 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T20:46:40,339 INFO [RS:0;5be53b084ac7:40355 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T20:46:40,339 INFO [RS:1;5be53b084ac7:39865 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T20:46:40,340 INFO [RS:2;5be53b084ac7:40781 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T20:46:40,345 INFO [RS:2;5be53b084ac7:40781 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5be53b084ac7%2C40781%2C1733949995244, suffix=, logDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/WALs/5be53b084ac7,40781,1733949995244, archiveDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/oldWALs, maxLogs=32 2024-12-11T20:46:40,346 INFO [RS:0;5be53b084ac7:40355 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5be53b084ac7%2C40355%2C1733949994820, suffix=, logDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/WALs/5be53b084ac7,40355,1733949994820, archiveDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/oldWALs, maxLogs=32 2024-12-11T20:46:40,354 INFO [RS:1;5be53b084ac7:39865 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5be53b084ac7%2C39865%2C1733949995112, suffix=, logDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/WALs/5be53b084ac7,39865,1733949995112, archiveDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/oldWALs, maxLogs=32 2024-12-11T20:46:40,377 DEBUG [RS:2;5be53b084ac7:40781 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/WALs/5be53b084ac7,40781,1733949995244/5be53b084ac7%2C40781%2C1733949995244.1733950000356, exclude list is [], retry=0 2024-12-11T20:46:40,389 DEBUG [RS:0;5be53b084ac7:40355 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/WALs/5be53b084ac7,40355,1733949994820/5be53b084ac7%2C40355%2C1733949994820.1733950000352, exclude list is [], retry=0 2024-12-11T20:46:40,396 WARN [IPC Server handler 3 on default port 36717 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T20:46:40,396 WARN [IPC Server handler 3 on default port 36717 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-11T20:46:40,397 WARN [IPC Server handler 3 on default port 36717 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-11T20:46:40,402 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44295,DS-58496555-acc8-4e14-a2aa-6653727f2410,DISK] 2024-12-11T20:46:40,402 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46705,DS-f27da252-d217-4063-b080-6d805699a8f4,DISK] 2024-12-11T20:46:40,404 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45301,DS-84276d66-e551-476e-a085-4f7c53a4846b,DISK] 2024-12-11T20:46:40,411 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44295,DS-58496555-acc8-4e14-a2aa-6653727f2410,DISK] 2024-12-11T20:46:40,411 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45301,DS-84276d66-e551-476e-a085-4f7c53a4846b,DISK] 2024-12-11T20:46:40,414 DEBUG [RS:1;5be53b084ac7:39865 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/WALs/5be53b084ac7,39865,1733949995112/5be53b084ac7%2C39865%2C1733949995112.1733950000355, exclude list is [], retry=0 2024-12-11T20:46:40,428 WARN [IPC Server handler 3 on default port 36717 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T20:46:40,428 WARN [IPC Server handler 3 on default port 36717 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-11T20:46:40,428 WARN [IPC Server handler 3 on default port 36717 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-11T20:46:40,433 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45301,DS-84276d66-e551-476e-a085-4f7c53a4846b,DISK] 2024-12-11T20:46:40,433 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44295,DS-58496555-acc8-4e14-a2aa-6653727f2410,DISK] 2024-12-11T20:46:40,481 INFO [RS:2;5be53b084ac7:40781 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/WALs/5be53b084ac7,40781,1733949995244/5be53b084ac7%2C40781%2C1733949995244.1733950000356 2024-12-11T20:46:40,488 DEBUG [RS:2;5be53b084ac7:40781 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46505:46505),(127.0.0.1/127.0.0.1:37781:37781)] 2024-12-11T20:46:40,512 INFO [RS:1;5be53b084ac7:39865 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/WALs/5be53b084ac7,39865,1733949995112/5be53b084ac7%2C39865%2C1733949995112.1733950000355 2024-12-11T20:46:40,513 WARN [5be53b084ac7:41779 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-11T20:46:40,543 DEBUG [RS:1;5be53b084ac7:39865 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46505:46505),(127.0.0.1/127.0.0.1:37781:37781)] 2024-12-11T20:46:40,555 INFO [RS:0;5be53b084ac7:40355 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/WALs/5be53b084ac7,40355,1733949994820/5be53b084ac7%2C40355%2C1733949994820.1733950000352 2024-12-11T20:46:40,559 DEBUG [RS:0;5be53b084ac7:40355 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33225:33225),(127.0.0.1/127.0.0.1:46505:46505),(127.0.0.1/127.0.0.1:37781:37781)] 2024-12-11T20:46:40,766 DEBUG [5be53b084ac7:41779 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-11T20:46:40,778 DEBUG [5be53b084ac7:41779 {}] balancer.BalancerClusterState(204): Hosts are {5be53b084ac7=0} racks are {/default-rack=0} 2024-12-11T20:46:40,797 DEBUG [5be53b084ac7:41779 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T20:46:40,797 DEBUG [5be53b084ac7:41779 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T20:46:40,797 DEBUG [5be53b084ac7:41779 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T20:46:40,797 DEBUG [5be53b084ac7:41779 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T20:46:40,797 DEBUG [5be53b084ac7:41779 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T20:46:40,797 DEBUG [5be53b084ac7:41779 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T20:46:40,797 INFO [5be53b084ac7:41779 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T20:46:40,797 INFO [5be53b084ac7:41779 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T20:46:40,797 INFO [5be53b084ac7:41779 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T20:46:40,797 DEBUG [5be53b084ac7:41779 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T20:46:40,820 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:46:40,839 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5be53b084ac7,40781,1733949995244, state=OPENING 2024-12-11T20:46:40,891 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-11T20:46:40,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:46:40,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:46:40,907 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:46:40,907 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T20:46:40,907 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T20:46:40,907 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:46:40,909 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-11T20:46:40,911 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T20:46:40,913 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T20:46:40,917 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5be53b084ac7,40781,1733949995244}] 2024-12-11T20:46:41,141 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T20:46:41,146 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47085, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T20:46:41,184 INFO [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-11T20:46:41,184 INFO [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T20:46:41,185 INFO [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-11T20:46:41,211 INFO [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5be53b084ac7%2C40781%2C1733949995244.meta, suffix=.meta, logDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/WALs/5be53b084ac7,40781,1733949995244, archiveDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/oldWALs, maxLogs=32 2024-12-11T20:46:41,260 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/WALs/5be53b084ac7,40781,1733949995244/5be53b084ac7%2C40781%2C1733949995244.meta.1733950001214.meta, exclude list is [], retry=0 2024-12-11T20:46:41,275 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45301,DS-84276d66-e551-476e-a085-4f7c53a4846b,DISK] 2024-12-11T20:46:41,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46705,DS-f27da252-d217-4063-b080-6d805699a8f4,DISK] 2024-12-11T20:46:41,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44295,DS-58496555-acc8-4e14-a2aa-6653727f2410,DISK] 2024-12-11T20:46:41,321 INFO [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/WALs/5be53b084ac7,40781,1733949995244/5be53b084ac7%2C40781%2C1733949995244.meta.1733950001214.meta 2024-12-11T20:46:41,330 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37781:37781),(127.0.0.1/127.0.0.1:33225:33225),(127.0.0.1/127.0.0.1:46505:46505)] 2024-12-11T20:46:41,331 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-11T20:46:41,332 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-12-11T20:46:41,334 INFO [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:46:41,335 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-11T20:46:41,338 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-11T20:46:41,341 INFO [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-11T20:46:41,356 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-11T20:46:41,356 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:46:41,357 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-11T20:46:41,357 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-11T20:46:41,379 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T20:46:41,381 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T20:46:41,382 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:46:41,383 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T20:46:41,383 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-11T20:46:41,385 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-11T20:46:41,385 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:46:41,391 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T20:46:41,391 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T20:46:41,399 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T20:46:41,399 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:46:41,401 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T20:46:41,402 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T20:46:41,417 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T20:46:41,417 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:46:41,420 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T20:46:41,420 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-11T20:46:41,422 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/meta/1588230740 2024-12-11T20:46:41,427 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/meta/1588230740 2024-12-11T20:46:41,433 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-11T20:46:41,433 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-11T20:46:41,435 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T20:46:41,439 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-11T20:46:41,442 INFO [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61018218, jitterRate=-0.09075769782066345}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T20:46:41,442 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-11T20:46:41,446 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733950001357Writing region info on filesystem at 1733950001358 (+1 ms)Initializing all the Stores at 1733950001360 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733950001360Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733950001379 (+19 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950001379Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733950001379Cleaning up temporary data from old regions at 1733950001433 (+54 ms)Running coprocessor post-open hooks at 1733950001442 (+9 ms)Region opened successfully at 1733950001446 (+4 ms) 2024-12-11T20:46:41,470 INFO [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733950001127 2024-12-11T20:46:41,494 DEBUG [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-11T20:46:41,501 INFO [RS_OPEN_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-11T20:46:41,511 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:46:41,524 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5be53b084ac7,40781,1733949995244, state=OPEN 2024-12-11T20:46:41,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T20:46:41,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T20:46:41,552 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T20:46:41,552 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T20:46:41,553 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5be53b084ac7,40781,1733949995244 2024-12-11T20:46:41,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T20:46:41,567 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T20:46:41,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T20:46:41,570 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T20:46:41,575 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-11T20:46:41,575 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5be53b084ac7,40781,1733949995244 in 636 msec 2024-12-11T20:46:41,600 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-11T20:46:41,600 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 2.2470 sec 2024-12-11T20:46:41,608 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T20:46:41,609 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-11T20:46:41,658 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:46:41,663 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:46:41,744 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:46:41,748 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59239, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:46:41,810 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 2.9850 sec 2024-12-11T20:46:41,811 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733950001811, completionTime=-1 2024-12-11T20:46:41,815 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-11T20:46:41,815 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-11T20:46:41,939 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-11T20:46:41,939 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733950061939 2024-12-11T20:46:41,940 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733950121940 2024-12-11T20:46:41,940 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 124 msec 2024-12-11T20:46:41,952 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-11T20:46:41,968 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,41779,1733949993169-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:41,970 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,41779,1733949993169-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:41,970 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,41779,1733949993169-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:41,974 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5be53b084ac7:41779, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:41,980 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:41,992 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:42,000 DEBUG [master/5be53b084ac7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-11T20:46:42,083 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 6.493sec 2024-12-11T20:46:42,097 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-11T20:46:42,099 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-11T20:46:42,101 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-11T20:46:42,102 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-11T20:46:42,102 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-11T20:46:42,103 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,41779,1733949993169-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T20:46:42,104 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,41779,1733949993169-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-11T20:46:42,157 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19e7eef7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:46:42,171 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-11T20:46:42,171 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-11T20:46:42,185 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:46:42,186 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-11T20:46:42,186 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] client.AsyncConnectionImpl(321): The fetched master address is 5be53b084ac7,41779,1733949993169 2024-12-11T20:46:42,189 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:46:42,189 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5cf259d2 2024-12-11T20:46:42,191 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T20:46:42,194 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43717, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T20:46:42,202 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41779 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-11T20:46:42,227 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:46:42,231 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:46:42,231 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:acl 2024-12-11T20:46:42,231 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:46:42,232 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@464006d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:46:42,232 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:46:42,235 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T20:46:42,236 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:46:42,236 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:46:42,236 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41779 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 4 2024-12-11T20:46:42,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T20:46:42,248 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T20:46:42,253 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33642, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:46:42,253 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:46:42,256 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@166ddd9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:46:42,257 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:46:42,268 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:46:42,272 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:46:42,297 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55518, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:46:42,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5be53b084ac7,41779,1733949993169 2024-12-11T20:46:42,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2305): Starting mini mapreduce cluster... 2024-12-11T20:46:42,307 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/test.cache.data in system properties and HBase conf 2024-12-11T20:46:42,307 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop.tmp.dir in system properties and HBase conf 2024-12-11T20:46:42,307 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop.log.dir in system properties and HBase conf 2024-12-11T20:46:42,307 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-11T20:46:42,307 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-11T20:46:42,307 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-11T20:46:42,307 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-11T20:46:42,308 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-11T20:46:42,308 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-11T20:46:42,308 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T20:46:42,308 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-11T20:46:42,308 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-11T20:46:42,308 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T20:46:42,308 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T20:46:42,308 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-11T20:46:42,308 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/nfs.dump.dir in system properties and HBase conf 2024-12-11T20:46:42,309 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/java.io.tmpdir in system properties and HBase conf 2024-12-11T20:46:42,309 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T20:46:42,309 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-11T20:46:42,309 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-11T20:46:42,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T20:46:42,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741837_1013 (size=349) 2024-12-11T20:46:42,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741837_1013 (size=349) 2024-12-11T20:46:42,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741837_1013 (size=349) 2024-12-11T20:46:42,444 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 92982b2478254451b467ff12e9fe4706, NAME => 'hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:46:42,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741838_1014 (size=36) 2024-12-11T20:46:42,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741838_1014 (size=36) 2024-12-11T20:46:42,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741838_1014 (size=36) 2024-12-11T20:46:42,576 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:46:42,577 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1722): Closing 92982b2478254451b467ff12e9fe4706, disabling compactions & flushes 2024-12-11T20:46:42,577 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706. 2024-12-11T20:46:42,577 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706. 2024-12-11T20:46:42,577 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706. after waiting 0 ms 2024-12-11T20:46:42,577 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706. 2024-12-11T20:46:42,577 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1973): Closed hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706. 2024-12-11T20:46:42,577 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 92982b2478254451b467ff12e9fe4706: Waiting for close lock at 1733950002577Disabling compacts and flushes for region at 1733950002577Disabling writes for close at 1733950002577Writing region close event to WAL at 1733950002577Closed at 1733950002577 2024-12-11T20:46:42,584 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T20:46:42,592 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1733950002585"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950002585"}]},"ts":"1733950002585"} 2024-12-11T20:46:42,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T20:46:42,617 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-11T20:46:42,623 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T20:46:42,627 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950002623"}]},"ts":"1733950002623"} 2024-12-11T20:46:42,636 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-12-11T20:46:42,637 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {5be53b084ac7=0} racks are {/default-rack=0} 2024-12-11T20:46:42,639 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T20:46:42,639 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T20:46:42,639 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T20:46:42,639 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T20:46:42,639 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T20:46:42,639 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T20:46:42,640 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T20:46:42,640 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T20:46:42,640 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T20:46:42,640 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T20:46:42,642 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=92982b2478254451b467ff12e9fe4706, ASSIGN}] 2024-12-11T20:46:42,650 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=92982b2478254451b467ff12e9fe4706, ASSIGN 2024-12-11T20:46:42,653 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=92982b2478254451b467ff12e9fe4706, ASSIGN; state=OFFLINE, location=5be53b084ac7,39865,1733949995112; forceNewPlan=false, retain=false 2024-12-11T20:46:42,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741839_1015 (size=592039) 2024-12-11T20:46:42,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741839_1015 (size=592039) 2024-12-11T20:46:42,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741839_1015 (size=592039) 2024-12-11T20:46:42,809 INFO [5be53b084ac7:41779 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-11T20:46:42,810 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=92982b2478254451b467ff12e9fe4706, regionState=OPENING, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:46:42,816 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=92982b2478254451b467ff12e9fe4706, ASSIGN because future has completed 2024-12-11T20:46:42,825 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 92982b2478254451b467ff12e9fe4706, server=5be53b084ac7,39865,1733949995112}] 2024-12-11T20:46:42,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741840_1016 (size=1663647) 2024-12-11T20:46:42,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741840_1016 (size=1663647) 2024-12-11T20:46:42,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741840_1016 (size=1663647) 2024-12-11T20:46:42,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T20:46:43,031 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T20:46:43,083 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34419, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T20:46:43,122 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706. 2024-12-11T20:46:43,123 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 92982b2478254451b467ff12e9fe4706, NAME => 'hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706.', STARTKEY => '', ENDKEY => ''} 2024-12-11T20:46:43,123 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706. service=AccessControlService 2024-12-11T20:46:43,124 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:46:43,124 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 92982b2478254451b467ff12e9fe4706 2024-12-11T20:46:43,124 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:46:43,124 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 92982b2478254451b467ff12e9fe4706 2024-12-11T20:46:43,125 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 92982b2478254451b467ff12e9fe4706 2024-12-11T20:46:43,135 INFO [StoreOpener-92982b2478254451b467ff12e9fe4706-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 92982b2478254451b467ff12e9fe4706 2024-12-11T20:46:43,150 INFO [StoreOpener-92982b2478254451b467ff12e9fe4706-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 92982b2478254451b467ff12e9fe4706 columnFamilyName l 2024-12-11T20:46:43,150 DEBUG [StoreOpener-92982b2478254451b467ff12e9fe4706-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:46:43,153 INFO [StoreOpener-92982b2478254451b467ff12e9fe4706-1 {}] regionserver.HStore(327): Store=92982b2478254451b467ff12e9fe4706/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:46:43,155 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 92982b2478254451b467ff12e9fe4706 2024-12-11T20:46:43,158 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/acl/92982b2478254451b467ff12e9fe4706 2024-12-11T20:46:43,159 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/acl/92982b2478254451b467ff12e9fe4706 2024-12-11T20:46:43,161 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 92982b2478254451b467ff12e9fe4706 2024-12-11T20:46:43,161 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 92982b2478254451b467ff12e9fe4706 2024-12-11T20:46:43,179 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 92982b2478254451b467ff12e9fe4706 2024-12-11T20:46:43,203 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/acl/92982b2478254451b467ff12e9fe4706/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:46:43,205 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened 92982b2478254451b467ff12e9fe4706; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62184632, jitterRate=-0.07337677478790283}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:46:43,206 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 92982b2478254451b467ff12e9fe4706 2024-12-11T20:46:43,210 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 92982b2478254451b467ff12e9fe4706: Running coprocessor pre-open hook at 1733950003125Writing region info on filesystem at 1733950003125Initializing all the Stores at 1733950003128 (+3 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733950003128Cleaning up temporary data from old regions at 1733950003161 (+33 ms)Running coprocessor post-open hooks at 1733950003206 (+45 ms)Region opened successfully at 1733950003210 (+4 ms) 2024-12-11T20:46:43,217 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706., pid=6, masterSystemTime=1733950003029 2024-12-11T20:46:43,243 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706. 2024-12-11T20:46:43,243 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706. 2024-12-11T20:46:43,244 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=92982b2478254451b467ff12e9fe4706, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:46:43,267 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 92982b2478254451b467ff12e9fe4706, server=5be53b084ac7,39865,1733949995112 because future has completed 2024-12-11T20:46:43,284 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-11T20:46:43,284 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 92982b2478254451b467ff12e9fe4706, server=5be53b084ac7,39865,1733949995112 in 448 msec 2024-12-11T20:46:43,303 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-11T20:46:43,303 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=92982b2478254451b467ff12e9fe4706, ASSIGN in 642 msec 2024-12-11T20:46:43,311 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T20:46:43,312 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950003311"}]},"ts":"1733950003311"} 2024-12-11T20:46:43,317 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-12-11T20:46:43,320 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T20:46:43,330 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:acl in 1.1140 sec 2024-12-11T20:46:43,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T20:46:43,425 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: hbase:acl completed 2024-12-11T20:46:43,436 DEBUG [master/5be53b084ac7:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-11T20:46:43,437 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-11T20:46:43,437 INFO [master/5be53b084ac7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5be53b084ac7,41779,1733949993169-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T20:46:43,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741832_1008 (size=32) 2024-12-11T20:46:43,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741831_1007 (size=1321) 2024-12-11T20:46:43,983 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:46:43,984 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-12-11T20:46:43,985 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-11T20:46:43,985 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-11T20:46:43,997 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-11T20:46:43,997 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-12-11T20:46:43,999 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:46:44,000 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-12-11T20:46:44,000 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-11T20:46:44,000 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-12-11T20:46:44,000 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:46:44,001 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-12-11T20:46:44,001 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-11T20:46:44,001 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-11T20:46:44,001 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-11T20:46:44,001 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-11T20:46:45,915 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T20:46:45,930 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-12-11T20:46:45,936 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-11T20:46:46,052 WARN [Thread-379 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T20:46:46,644 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T20:46:46,654 WARN [Thread-379 {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-11T20:46:46,655 INFO [Thread-379 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T20:46:46,767 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T20:46:46,767 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T20:46:46,767 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T20:46:46,811 INFO [Thread-379 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T20:46:46,811 INFO [Thread-379 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T20:46:46,811 INFO [Thread-379 {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T20:46:46,816 INFO [Thread-379 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@240d436c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop.log.dir/,AVAILABLE} 2024-12-11T20:46:46,816 INFO [Thread-379 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4fa11621{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-11T20:46:46,827 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T20:46:46,845 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77cab988{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop.log.dir/,AVAILABLE} 2024-12-11T20:46:46,846 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4bcfbfc5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-11T20:46:46,955 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T20:46:47,222 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.mapreduce.v2.hs.webapp.HsWebServices as a root resource class 2024-12-11T20:46:47,233 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.mapreduce.v2.hs.webapp.JAXBContextResolver as a provider class 2024-12-11T20:46:47,233 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-11T20:46:47,236 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-11T20:46:47,392 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.mapreduce.v2.hs.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-11T20:46:47,728 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-11T20:46:48,189 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.mapreduce.v2.hs.webapp.HsWebServices to GuiceManagedComponentProvider with the scope "PerRequest" 2024-12-11T20:46:48,220 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62a90d5c{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/java.io.tmpdir/jetty-localhost-33861-hadoop-yarn-common-3_4_1_jar-_-any-2339942130569725535/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-11T20:46:48,220 INFO [Thread-379 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3e64743c{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/java.io.tmpdir/jetty-localhost-38147-hadoop-yarn-common-3_4_1_jar-_-any-16774482681960539311/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-11T20:46:48,223 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2239a866{HTTP/1.1, (http/1.1)}{localhost:33861} 2024-12-11T20:46:48,223 INFO [Thread-379 {}] server.AbstractConnector(333): Started ServerConnector@5a3501ea{HTTP/1.1, (http/1.1)}{localhost:38147} 2024-12-11T20:46:48,223 INFO [Time-limited test {}] server.Server(415): Started @27208ms 2024-12-11T20:46:48,223 INFO [Thread-379 {}] server.Server(415): Started @27208ms 2024-12-11T20:46:48,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741841_1017 (size=5) 2024-12-11T20:46:48,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741841_1017 (size=5) 2024-12-11T20:46:48,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741841_1017 (size=5) 2024-12-11T20:46:50,283 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-11T20:46:50,288 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T20:46:50,334 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-11T20:46:50,344 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T20:46:50,404 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T20:46:50,404 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T20:46:50,404 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T20:46:50,411 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T20:46:50,420 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@bfc944e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop.log.dir/,AVAILABLE} 2024-12-11T20:46:50,422 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@71b0e2cb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-11T20:46:50,552 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-11T20:46:50,552 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-11T20:46:50,553 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-11T20:46:50,554 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-11T20:46:50,585 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-11T20:46:50,616 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-11T20:46:51,013 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-11T20:46:51,054 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@174cbe06{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/java.io.tmpdir/jetty-localhost-39707-hadoop-yarn-common-3_4_1_jar-_-any-10715411769300443434/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-11T20:46:51,063 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@52e9365b{HTTP/1.1, (http/1.1)}{localhost:39707} 2024-12-11T20:46:51,063 INFO [Time-limited test {}] server.Server(415): Started @30049ms 2024-12-11T20:46:51,587 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-11T20:46:51,597 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T20:46:51,634 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-11T20:46:51,636 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T20:46:51,647 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T20:46:51,647 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T20:46:51,647 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T20:46:51,649 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T20:46:51,650 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30a89850{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop.log.dir/,AVAILABLE} 2024-12-11T20:46:51,652 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c2f3736{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-11T20:46:51,723 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-11T20:46:51,724 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-11T20:46:51,724 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-11T20:46:51,724 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-11T20:46:51,735 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-11T20:46:51,742 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-11T20:46:51,913 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-11T20:46:51,919 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3dcf889b{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/java.io.tmpdir/jetty-localhost-42181-hadoop-yarn-common-3_4_1_jar-_-any-3198401534511232170/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-11T20:46:51,920 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@21af667e{HTTP/1.1, (http/1.1)}{localhost:42181} 2024-12-11T20:46:51,921 INFO [Time-limited test {}] server.Server(415): Started @30906ms 2024-12-11T20:46:51,956 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2341): Mini mapreduce cluster started 2024-12-11T20:46:51,957 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:46:51,989 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=714, OpenFileDescriptor=763, MaxFileDescriptor=1048576, SystemLoadAverage=759, ProcessCount=11, AvailableMemoryMB=4468 2024-12-11T20:46:51,992 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=714 is superior to 500 2024-12-11T20:46:51,998 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-11T20:46:52,008 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 5be53b084ac7,41779,1733949993169 2024-12-11T20:46:52,008 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@14d663cd 2024-12-11T20:46:52,008 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T20:46:52,014 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55332, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T20:46:52,015 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T20:46:52,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T20:46:52,024 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T20:46:52,026 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSplitRegion" procId is: 7 2024-12-11T20:46:52,026 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:46:52,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T20:46:52,029 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T20:46:52,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741842_1018 (size=422) 2024-12-11T20:46:52,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741842_1018 (size=422) 2024-12-11T20:46:52,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741842_1018 (size=422) 2024-12-11T20:46:52,090 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d4b8a9e23f55a7aec4663a69038062bc, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:46:52,098 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 870517e9f0b0a069255c252b508eb39d, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:46:52,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T20:46:52,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741844_1020 (size=83) 2024-12-11T20:46:52,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741844_1020 (size=83) 2024-12-11T20:46:52,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741844_1020 (size=83) 2024-12-11T20:46:52,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741843_1019 (size=83) 2024-12-11T20:46:52,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741843_1019 (size=83) 2024-12-11T20:46:52,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741843_1019 (size=83) 2024-12-11T20:46:52,192 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:46:52,192 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1722): Closing 870517e9f0b0a069255c252b508eb39d, disabling compactions & flushes 2024-12-11T20:46:52,192 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d. 2024-12-11T20:46:52,193 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d. 2024-12-11T20:46:52,193 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d. after waiting 0 ms 2024-12-11T20:46:52,193 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d. 2024-12-11T20:46:52,193 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d. 2024-12-11T20:46:52,193 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for 870517e9f0b0a069255c252b508eb39d: Waiting for close lock at 1733950012192Disabling compacts and flushes for region at 1733950012192Disabling writes for close at 1733950012193 (+1 ms)Writing region close event to WAL at 1733950012193Closed at 1733950012193 2024-12-11T20:46:52,195 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:46:52,195 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing d4b8a9e23f55a7aec4663a69038062bc, disabling compactions & flushes 2024-12-11T20:46:52,195 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc. 2024-12-11T20:46:52,196 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc. 2024-12-11T20:46:52,196 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc. after waiting 0 ms 2024-12-11T20:46:52,196 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc. 2024-12-11T20:46:52,196 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc. 2024-12-11T20:46:52,196 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for d4b8a9e23f55a7aec4663a69038062bc: Waiting for close lock at 1733950012195Disabling compacts and flushes for region at 1733950012195Disabling writes for close at 1733950012196 (+1 ms)Writing region close event to WAL at 1733950012196Closed at 1733950012196 2024-12-11T20:46:52,201 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T20:46:52,201 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733950012201"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950012201"}]},"ts":"1733950012201"} 2024-12-11T20:46:52,202 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733950012201"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950012201"}]},"ts":"1733950012201"} 2024-12-11T20:46:52,275 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T20:46:52,278 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T20:46:52,278 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950012278"}]},"ts":"1733950012278"} 2024-12-11T20:46:52,284 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-12-11T20:46:52,285 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {5be53b084ac7=0} racks are {/default-rack=0} 2024-12-11T20:46:52,288 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T20:46:52,288 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T20:46:52,288 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T20:46:52,288 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T20:46:52,288 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T20:46:52,288 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T20:46:52,289 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T20:46:52,289 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T20:46:52,289 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T20:46:52,289 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T20:46:52,289 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=d4b8a9e23f55a7aec4663a69038062bc, ASSIGN}, {pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=870517e9f0b0a069255c252b508eb39d, ASSIGN}] 2024-12-11T20:46:52,292 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=d4b8a9e23f55a7aec4663a69038062bc, ASSIGN 2024-12-11T20:46:52,293 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=870517e9f0b0a069255c252b508eb39d, ASSIGN 2024-12-11T20:46:52,295 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=d4b8a9e23f55a7aec4663a69038062bc, ASSIGN; state=OFFLINE, location=5be53b084ac7,40355,1733949994820; forceNewPlan=false, retain=false 2024-12-11T20:46:52,296 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=870517e9f0b0a069255c252b508eb39d, ASSIGN; state=OFFLINE, location=5be53b084ac7,40781,1733949995244; forceNewPlan=false, retain=false 2024-12-11T20:46:52,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T20:46:52,463 INFO [5be53b084ac7:41779 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T20:46:52,464 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=870517e9f0b0a069255c252b508eb39d, regionState=OPENING, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:46:52,465 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=d4b8a9e23f55a7aec4663a69038062bc, regionState=OPENING, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:46:52,476 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=870517e9f0b0a069255c252b508eb39d, ASSIGN because future has completed 2024-12-11T20:46:52,486 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=d4b8a9e23f55a7aec4663a69038062bc, ASSIGN because future has completed 2024-12-11T20:46:52,491 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 870517e9f0b0a069255c252b508eb39d, server=5be53b084ac7,40781,1733949995244}] 2024-12-11T20:46:52,500 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure d4b8a9e23f55a7aec4663a69038062bc, server=5be53b084ac7,40355,1733949994820}] 2024-12-11T20:46:52,660 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T20:46:52,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T20:46:52,693 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40833, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T20:46:52,710 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d. 2024-12-11T20:46:52,711 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => 870517e9f0b0a069255c252b508eb39d, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T20:46:52,711 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d. service=AccessControlService 2024-12-11T20:46:52,711 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:46:52,712 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion 870517e9f0b0a069255c252b508eb39d 2024-12-11T20:46:52,712 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:46:52,712 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for 870517e9f0b0a069255c252b508eb39d 2024-12-11T20:46:52,712 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for 870517e9f0b0a069255c252b508eb39d 2024-12-11T20:46:52,716 INFO [StoreOpener-870517e9f0b0a069255c252b508eb39d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 870517e9f0b0a069255c252b508eb39d 2024-12-11T20:46:52,720 INFO [StoreOpener-870517e9f0b0a069255c252b508eb39d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 870517e9f0b0a069255c252b508eb39d columnFamilyName cf 2024-12-11T20:46:52,720 DEBUG [StoreOpener-870517e9f0b0a069255c252b508eb39d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:46:52,720 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc. 2024-12-11T20:46:52,721 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7752): Opening region: {ENCODED => d4b8a9e23f55a7aec4663a69038062bc, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T20:46:52,721 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc. service=AccessControlService 2024-12-11T20:46:52,721 INFO [StoreOpener-870517e9f0b0a069255c252b508eb39d-1 {}] regionserver.HStore(327): Store=870517e9f0b0a069255c252b508eb39d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:46:52,721 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:46:52,721 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for 870517e9f0b0a069255c252b508eb39d 2024-12-11T20:46:52,722 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion d4b8a9e23f55a7aec4663a69038062bc 2024-12-11T20:46:52,722 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:46:52,723 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7794): checking encryption for d4b8a9e23f55a7aec4663a69038062bc 2024-12-11T20:46:52,723 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7797): checking classloading for d4b8a9e23f55a7aec4663a69038062bc 2024-12-11T20:46:52,725 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/870517e9f0b0a069255c252b508eb39d 2024-12-11T20:46:52,727 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/870517e9f0b0a069255c252b508eb39d 2024-12-11T20:46:52,730 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for 870517e9f0b0a069255c252b508eb39d 2024-12-11T20:46:52,730 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for 870517e9f0b0a069255c252b508eb39d 2024-12-11T20:46:52,731 INFO [StoreOpener-d4b8a9e23f55a7aec4663a69038062bc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d4b8a9e23f55a7aec4663a69038062bc 2024-12-11T20:46:52,735 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for 870517e9f0b0a069255c252b508eb39d 2024-12-11T20:46:52,740 INFO [StoreOpener-d4b8a9e23f55a7aec4663a69038062bc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d4b8a9e23f55a7aec4663a69038062bc columnFamilyName cf 2024-12-11T20:46:52,740 DEBUG [StoreOpener-d4b8a9e23f55a7aec4663a69038062bc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:46:52,751 INFO [StoreOpener-d4b8a9e23f55a7aec4663a69038062bc-1 {}] regionserver.HStore(327): Store=d4b8a9e23f55a7aec4663a69038062bc/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:46:52,754 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1038): replaying wal for d4b8a9e23f55a7aec4663a69038062bc 2024-12-11T20:46:52,760 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/d4b8a9e23f55a7aec4663a69038062bc 2024-12-11T20:46:52,761 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/d4b8a9e23f55a7aec4663a69038062bc 2024-12-11T20:46:52,767 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1048): stopping wal replay for d4b8a9e23f55a7aec4663a69038062bc 2024-12-11T20:46:52,768 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/870517e9f0b0a069255c252b508eb39d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:46:52,769 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1060): Cleaning up temporary data for d4b8a9e23f55a7aec4663a69038062bc 2024-12-11T20:46:52,770 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1114): Opened 870517e9f0b0a069255c252b508eb39d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61776469, jitterRate=-0.07945887744426727}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:46:52,770 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 870517e9f0b0a069255c252b508eb39d 2024-12-11T20:46:52,771 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for 870517e9f0b0a069255c252b508eb39d: Running coprocessor pre-open hook at 1733950012713Writing region info on filesystem at 1733950012713Initializing all the Stores at 1733950012715 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950012715Cleaning up temporary data from old regions at 1733950012730 (+15 ms)Running coprocessor post-open hooks at 1733950012770 (+40 ms)Region opened successfully at 1733950012771 (+1 ms) 2024-12-11T20:46:52,779 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d., pid=10, masterSystemTime=1733950012665 2024-12-11T20:46:52,785 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1093): writing seq id for d4b8a9e23f55a7aec4663a69038062bc 2024-12-11T20:46:52,789 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d. 2024-12-11T20:46:52,789 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d. 2024-12-11T20:46:52,791 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=870517e9f0b0a069255c252b508eb39d, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:46:52,801 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 870517e9f0b0a069255c252b508eb39d, server=5be53b084ac7,40781,1733949995244 because future has completed 2024-12-11T20:46:52,804 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/d4b8a9e23f55a7aec4663a69038062bc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:46:52,806 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1114): Opened d4b8a9e23f55a7aec4663a69038062bc; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59703593, jitterRate=-0.11034713685512543}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:46:52,807 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d4b8a9e23f55a7aec4663a69038062bc 2024-12-11T20:46:52,807 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1006): Region open journal for d4b8a9e23f55a7aec4663a69038062bc: Running coprocessor pre-open hook at 1733950012723Writing region info on filesystem at 1733950012723Initializing all the Stores at 1733950012728 (+5 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950012728Cleaning up temporary data from old regions at 1733950012769 (+41 ms)Running coprocessor post-open hooks at 1733950012807 (+38 ms)Region opened successfully at 1733950012807 2024-12-11T20:46:52,813 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc., pid=11, masterSystemTime=1733950012660 2024-12-11T20:46:52,821 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc. 2024-12-11T20:46:52,821 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc. 2024-12-11T20:46:52,828 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=d4b8a9e23f55a7aec4663a69038062bc, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:46:52,834 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-11T20:46:52,835 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure 870517e9f0b0a069255c252b508eb39d, server=5be53b084ac7,40781,1733949995244 in 329 msec 2024-12-11T20:46:52,849 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure d4b8a9e23f55a7aec4663a69038062bc, server=5be53b084ac7,40355,1733949994820 because future has completed 2024-12-11T20:46:52,854 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=870517e9f0b0a069255c252b508eb39d, ASSIGN in 546 msec 2024-12-11T20:46:52,876 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-11T20:46:52,876 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=d4b8a9e23f55a7aec4663a69038062bc, ASSIGN in 578 msec 2024-12-11T20:46:52,879 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=8 2024-12-11T20:46:52,879 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure d4b8a9e23f55a7aec4663a69038062bc, server=5be53b084ac7,40355,1733949994820 in 353 msec 2024-12-11T20:46:52,880 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T20:46:52,880 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950012880"}]},"ts":"1733950012880"} 2024-12-11T20:46:52,893 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-12-11T20:46:52,896 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T20:46:52,902 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-12-11T20:46:52,922 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:46:52,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:46:52,926 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44039, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:46:52,933 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:46:52,934 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:46:52,934 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:46:52,939 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53099, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-12-11T20:46:52,942 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:46:52,943 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:46:52,945 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49769, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-12-11T20:46:52,948 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-11T20:46:53,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-11T20:46:53,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:46:53,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-11T20:46:53,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:46:53,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-11T20:46:53,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-11T20:46:53,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:46:53,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:46:53,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T20:46:53,025 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T20:46:53,026 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T20:46:53,026 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T20:46:53,029 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T20:46:53,033 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 1.0120 sec 2024-12-11T20:46:53,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T20:46:53,192 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-11T20:46:53,193 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithSplitRegion get assigned. Timeout = 60000ms 2024-12-11T20:46:53,194 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:46:53,201 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithSplitRegion assigned to meta. Checking AM states. 2024-12-11T20:46:53,202 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:46:53,203 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithSplitRegion assigned. 2024-12-11T20:46:53,207 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T20:46:53,226 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-11T20:46:53,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733950013227 (current time:1733950013227). 2024-12-11T20:46:53,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T20:46:53,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-11T20:46:53,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T20:46:53,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43203965, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:46:53,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:46:53,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:46:53,232 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:46:53,232 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:46:53,232 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:46:53,233 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@328e794d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:46:53,233 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:46:53,233 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:46:53,233 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:46:53,234 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55340, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:46:53,236 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1be59255, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:46:53,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:46:53,238 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:46:53,238 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:46:53,240 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49380, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:46:53,242 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779. 2024-12-11T20:46:53,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:46:53,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:46:53,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:46:53,253 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:46:53,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b9d565b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:46:53,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:46:53,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:46:53,256 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:46:53,256 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:46:53,257 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:46:53,257 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@241962b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:46:53,257 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:46:53,257 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:46:53,258 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:46:53,259 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55350, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:46:53,260 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@494aaf65, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:46:53,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:46:53,263 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:46:53,263 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:46:53,265 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49384, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:46:53,269 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:46:53,270 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:46:53,271 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55888, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:46:53,273 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779. 2024-12-11T20:46:53,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:46:53,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:46:53,273 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:46:53,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:46:53,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-11T20:46:53,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T20:46:53,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-11T20:46:53,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-12-11T20:46:53,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-11T20:46:53,289 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T20:46:53,296 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T20:46:53,313 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T20:46:53,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741845_1021 (size=215) 2024-12-11T20:46:53,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741845_1021 (size=215) 2024-12-11T20:46:53,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741845_1021 (size=215) 2024-12-11T20:46:53,353 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T20:46:53,356 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d4b8a9e23f55a7aec4663a69038062bc}, {pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 870517e9f0b0a069255c252b508eb39d}] 2024-12-11T20:46:53,361 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d4b8a9e23f55a7aec4663a69038062bc 2024-12-11T20:46:53,361 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 870517e9f0b0a069255c252b508eb39d 2024-12-11T20:46:53,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-11T20:46:53,520 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40355 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=13 2024-12-11T20:46:53,523 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40781 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=14 2024-12-11T20:46:53,528 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc. 2024-12-11T20:46:53,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.HRegion(2603): Flush status journal for d4b8a9e23f55a7aec4663a69038062bc: 2024-12-11T20:46:53,538 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d. 2024-12-11T20:46:53,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-11T20:46:53,538 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 870517e9f0b0a069255c252b508eb39d: 2024-12-11T20:46:53,538 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-11T20:46:53,539 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T20:46:53,539 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T20:46:53,545 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:46:53,545 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:46:53,549 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T20:46:53,549 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T20:46:53,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-11T20:46:53,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741846_1022 (size=86) 2024-12-11T20:46:53,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741846_1022 (size=86) 2024-12-11T20:46:53,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741846_1022 (size=86) 2024-12-11T20:46:53,628 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d. 2024-12-11T20:46:53,630 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-11T20:46:53,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-11T20:46:53,642 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region 870517e9f0b0a069255c252b508eb39d 2024-12-11T20:46:53,642 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 870517e9f0b0a069255c252b508eb39d 2024-12-11T20:46:53,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741847_1023 (size=86) 2024-12-11T20:46:53,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741847_1023 (size=86) 2024-12-11T20:46:53,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741847_1023 (size=86) 2024-12-11T20:46:53,664 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 870517e9f0b0a069255c252b508eb39d in 292 msec 2024-12-11T20:46:53,673 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc. 2024-12-11T20:46:53,673 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-11T20:46:53,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=13 2024-12-11T20:46:53,674 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region d4b8a9e23f55a7aec4663a69038062bc 2024-12-11T20:46:53,674 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d4b8a9e23f55a7aec4663a69038062bc 2024-12-11T20:46:53,696 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=12 2024-12-11T20:46:53,696 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d4b8a9e23f55a7aec4663a69038062bc in 323 msec 2024-12-11T20:46:53,696 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T20:46:53,712 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T20:46:53,724 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T20:46:53,724 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T20:46:53,728 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T20:46:53,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741848_1024 (size=597) 2024-12-11T20:46:53,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741848_1024 (size=597) 2024-12-11T20:46:53,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741848_1024 (size=597) 2024-12-11T20:46:53,848 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T20:46:53,894 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T20:46:53,895 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T20:46:53,901 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T20:46:53,901 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-12-11T20:46:53,914 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 620 msec 2024-12-11T20:46:53,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-11T20:46:53,916 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-11T20:46:53,945 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='0290afc6e446e1db49590cef68c2ab113', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc., hostname=5be53b084ac7,40355,1733949994820, seqNum=2] 2024-12-11T20:46:53,949 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='167fa2c62a38f6111ae665147e6400de1', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:46:53,953 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='20f926347fd13377d7d8c3d9b3eb3628c', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:46:53,957 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='34b0e234f6c464efd2e21a8072a7cee5c', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:46:53,969 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:46:53,973 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T20:46:53,973 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-12-11T20:46:53,984 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49718, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:46:53,986 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40781 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T20:46:53,988 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40355 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T20:46:54,007 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T20:46:54,019 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T20:46:54,020 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc. 2024-12-11T20:46:54,021 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:46:54,031 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T20:46:54,072 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T20:46:54,096 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T20:46:54,104 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-11T20:46:54,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733950014104 (current time:1733950014104). 2024-12-11T20:46:54,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T20:46:54,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-11T20:46:54,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T20:46:54,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e78bf11, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:46:54,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:46:54,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:46:54,109 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:46:54,110 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:46:54,110 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:46:54,110 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@549e3c4a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:46:54,110 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:46:54,111 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:46:54,111 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:46:54,113 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55370, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:46:54,118 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e513b9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:46:54,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:46:54,121 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:46:54,121 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:46:54,123 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49390, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:46:54,126 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779. 2024-12-11T20:46:54,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:46:54,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:46:54,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:46:54,128 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:46:54,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5dafab33, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:46:54,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:46:54,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:46:54,137 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:46:54,138 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:46:54,138 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:46:54,138 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@420415a6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:46:54,138 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:46:54,139 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:46:54,139 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:46:54,144 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55400, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:46:54,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2eb34dbf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:46:54,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:46:54,157 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:46:54,157 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:46:54,160 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49404, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:46:54,167 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:46:54,167 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:46:54,170 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55892, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:46:54,172 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779. 2024-12-11T20:46:54,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:46:54,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:46:54,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:46:54,173 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:46:54,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-11T20:46:54,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T20:46:54,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-11T20:46:54,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-12-11T20:46:54,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-11T20:46:54,190 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T20:46:54,194 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T20:46:54,201 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T20:46:54,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-11T20:46:54,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741849_1025 (size=210) 2024-12-11T20:46:54,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741849_1025 (size=210) 2024-12-11T20:46:54,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741849_1025 (size=210) 2024-12-11T20:46:54,313 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T20:46:54,314 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d4b8a9e23f55a7aec4663a69038062bc}, {pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 870517e9f0b0a069255c252b508eb39d}] 2024-12-11T20:46:54,320 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 870517e9f0b0a069255c252b508eb39d 2024-12-11T20:46:54,320 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d4b8a9e23f55a7aec4663a69038062bc 2024-12-11T20:46:54,484 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40355 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=16 2024-12-11T20:46:54,484 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40781 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=17 2024-12-11T20:46:54,488 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d. 2024-12-11T20:46:54,491 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc. 2024-12-11T20:46:54,493 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2902): Flushing d4b8a9e23f55a7aec4663a69038062bc 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-11T20:46:54,493 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2902): Flushing 870517e9f0b0a069255c252b508eb39d 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-11T20:46:54,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-11T20:46:54,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/870517e9f0b0a069255c252b508eb39d/.tmp/cf/fbca09893f1743398c862a436d02d131 is 71, key is 13140fd7b9d0c34fc6b9ef8de6b84b21/cf:q/1733950013986/Put/seqid=0 2024-12-11T20:46:54,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/d4b8a9e23f55a7aec4663a69038062bc/.tmp/cf/127a0ac6373b49d0bf865e78d86cca93 is 71, key is 06992674784dbb2a3cf96d1bbe177200/cf:q/1733950013987/Put/seqid=0 2024-12-11T20:46:54,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741851_1027 (size=5288) 2024-12-11T20:46:54,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741851_1027 (size=5288) 2024-12-11T20:46:54,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741851_1027 (size=5288) 2024-12-11T20:46:54,706 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/d4b8a9e23f55a7aec4663a69038062bc/.tmp/cf/127a0ac6373b49d0bf865e78d86cca93 2024-12-11T20:46:54,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741850_1026 (size=8324) 2024-12-11T20:46:54,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741850_1026 (size=8324) 2024-12-11T20:46:54,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741850_1026 (size=8324) 2024-12-11T20:46:54,735 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/870517e9f0b0a069255c252b508eb39d/.tmp/cf/fbca09893f1743398c862a436d02d131 2024-12-11T20:46:54,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-11T20:46:54,859 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/d4b8a9e23f55a7aec4663a69038062bc/.tmp/cf/127a0ac6373b49d0bf865e78d86cca93 as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/d4b8a9e23f55a7aec4663a69038062bc/cf/127a0ac6373b49d0bf865e78d86cca93 2024-12-11T20:46:54,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/870517e9f0b0a069255c252b508eb39d/.tmp/cf/fbca09893f1743398c862a436d02d131 as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/870517e9f0b0a069255c252b508eb39d/cf/fbca09893f1743398c862a436d02d131 2024-12-11T20:46:54,910 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/d4b8a9e23f55a7aec4663a69038062bc/cf/127a0ac6373b49d0bf865e78d86cca93, entries=3, sequenceid=6, filesize=5.2 K 2024-12-11T20:46:54,912 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/870517e9f0b0a069255c252b508eb39d/cf/fbca09893f1743398c862a436d02d131, entries=47, sequenceid=6, filesize=8.1 K 2024-12-11T20:46:54,927 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for d4b8a9e23f55a7aec4663a69038062bc in 428ms, sequenceid=6, compaction requested=false 2024-12-11T20:46:54,928 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSplitRegion' 2024-12-11T20:46:54,929 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 870517e9f0b0a069255c252b508eb39d in 438ms, sequenceid=6, compaction requested=false 2024-12-11T20:46:54,929 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSplitRegion' 2024-12-11T20:46:54,929 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2603): Flush status journal for d4b8a9e23f55a7aec4663a69038062bc: 2024-12-11T20:46:54,929 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-11T20:46:54,930 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T20:46:54,930 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:46:54,930 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/d4b8a9e23f55a7aec4663a69038062bc/cf/127a0ac6373b49d0bf865e78d86cca93] hfiles 2024-12-11T20:46:54,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2603): Flush status journal for 870517e9f0b0a069255c252b508eb39d: 2024-12-11T20:46:54,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-11T20:46:54,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T20:46:54,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:46:54,932 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/870517e9f0b0a069255c252b508eb39d/cf/fbca09893f1743398c862a436d02d131] hfiles 2024-12-11T20:46:54,935 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/d4b8a9e23f55a7aec4663a69038062bc/cf/127a0ac6373b49d0bf865e78d86cca93 for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T20:46:54,937 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/870517e9f0b0a069255c252b508eb39d/cf/fbca09893f1743398c862a436d02d131 for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T20:46:55,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741852_1028 (size=125) 2024-12-11T20:46:55,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741852_1028 (size=125) 2024-12-11T20:46:55,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741852_1028 (size=125) 2024-12-11T20:46:55,023 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc. 2024-12-11T20:46:55,023 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=16 2024-12-11T20:46:55,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=16 2024-12-11T20:46:55,025 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region d4b8a9e23f55a7aec4663a69038062bc 2024-12-11T20:46:55,026 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d4b8a9e23f55a7aec4663a69038062bc 2024-12-11T20:46:55,038 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d4b8a9e23f55a7aec4663a69038062bc in 718 msec 2024-12-11T20:46:55,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741853_1029 (size=125) 2024-12-11T20:46:55,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741853_1029 (size=125) 2024-12-11T20:46:55,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741853_1029 (size=125) 2024-12-11T20:46:55,059 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d. 2024-12-11T20:46:55,060 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-11T20:46:55,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=17 2024-12-11T20:46:55,062 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region 870517e9f0b0a069255c252b508eb39d 2024-12-11T20:46:55,063 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 870517e9f0b0a069255c252b508eb39d 2024-12-11T20:46:55,071 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=17, resume processing ppid=15 2024-12-11T20:46:55,071 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T20:46:55,071 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 870517e9f0b0a069255c252b508eb39d in 752 msec 2024-12-11T20:46:55,073 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T20:46:55,074 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T20:46:55,075 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T20:46:55,076 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T20:46:55,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741854_1030 (size=675) 2024-12-11T20:46:55,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741854_1030 (size=675) 2024-12-11T20:46:55,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741854_1030 (size=675) 2024-12-11T20:46:55,199 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T20:46:55,280 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T20:46:55,286 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T20:46:55,296 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T20:46:55,296 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-12-11T20:46:55,308 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 1.1200 sec 2024-12-11T20:46:55,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-11T20:46:55,344 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-11T20:46:55,385 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T20:46:55,393 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T20:46:55,395 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55900, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T20:46:55,395 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T20:46:55,396 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39865 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-11T20:46:55,397 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49724, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T20:46:55,399 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40355 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-11T20:46:55,402 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49408, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T20:46:55,403 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40781 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-11T20:46:55,410 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportFileSystemStateWithSplitRegion', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T20:46:55,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-11T20:46:55,424 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T20:46:55,424 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:46:55,426 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportFileSystemStateWithSplitRegion" procId is: 18 2024-12-11T20:46:55,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-11T20:46:55,431 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T20:46:55,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741855_1031 (size=390) 2024-12-11T20:46:55,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741855_1031 (size=390) 2024-12-11T20:46:55,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741855_1031 (size=390) 2024-12-11T20:46:55,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-11T20:46:55,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-11T20:46:55,915 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => fef909894bb25880053931bc84e6d928, NAME => 'testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:46:55,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741856_1032 (size=75) 2024-12-11T20:46:55,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741856_1032 (size=75) 2024-12-11T20:46:55,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741856_1032 (size=75) 2024-12-11T20:46:55,976 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:46:55,976 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing fef909894bb25880053931bc84e6d928, disabling compactions & flushes 2024-12-11T20:46:55,977 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928. 2024-12-11T20:46:55,977 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928. 2024-12-11T20:46:55,977 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928. after waiting 0 ms 2024-12-11T20:46:55,977 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928. 2024-12-11T20:46:55,977 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928. 2024-12-11T20:46:55,977 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for fef909894bb25880053931bc84e6d928: Waiting for close lock at 1733950015976Disabling compacts and flushes for region at 1733950015976Disabling writes for close at 1733950015977 (+1 ms)Writing region close event to WAL at 1733950015977Closed at 1733950015977 2024-12-11T20:46:55,980 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T20:46:55,980 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1733950015980"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950015980"}]},"ts":"1733950015980"} 2024-12-11T20:46:55,992 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-11T20:46:55,994 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T20:46:55,994 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950015994"}]},"ts":"1733950015994"} 2024-12-11T20:46:56,000 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-12-11T20:46:56,000 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {5be53b084ac7=0} racks are {/default-rack=0} 2024-12-11T20:46:56,002 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T20:46:56,002 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T20:46:56,002 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T20:46:56,002 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T20:46:56,002 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T20:46:56,002 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T20:46:56,002 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T20:46:56,002 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T20:46:56,002 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T20:46:56,002 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T20:46:56,003 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=fef909894bb25880053931bc84e6d928, ASSIGN}] 2024-12-11T20:46:56,007 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=fef909894bb25880053931bc84e6d928, ASSIGN 2024-12-11T20:46:56,010 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=fef909894bb25880053931bc84e6d928, ASSIGN; state=OFFLINE, location=5be53b084ac7,40355,1733949994820; forceNewPlan=false, retain=false 2024-12-11T20:46:56,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-11T20:46:56,162 INFO [5be53b084ac7:41779 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-11T20:46:56,163 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=fef909894bb25880053931bc84e6d928, regionState=OPENING, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:46:56,167 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=fef909894bb25880053931bc84e6d928, ASSIGN because future has completed 2024-12-11T20:46:56,171 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure fef909894bb25880053931bc84e6d928, server=5be53b084ac7,40355,1733949994820}] 2024-12-11T20:46:56,339 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928. 2024-12-11T20:46:56,339 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7752): Opening region: {ENCODED => fef909894bb25880053931bc84e6d928, NAME => 'testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928.', STARTKEY => '', ENDKEY => ''} 2024-12-11T20:46:56,340 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928. service=AccessControlService 2024-12-11T20:46:56,340 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:46:56,340 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion fef909894bb25880053931bc84e6d928 2024-12-11T20:46:56,340 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:46:56,341 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7794): checking encryption for fef909894bb25880053931bc84e6d928 2024-12-11T20:46:56,341 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7797): checking classloading for fef909894bb25880053931bc84e6d928 2024-12-11T20:46:56,348 INFO [StoreOpener-fef909894bb25880053931bc84e6d928-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region fef909894bb25880053931bc84e6d928 2024-12-11T20:46:56,352 INFO [StoreOpener-fef909894bb25880053931bc84e6d928-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fef909894bb25880053931bc84e6d928 columnFamilyName cf 2024-12-11T20:46:56,353 DEBUG [StoreOpener-fef909894bb25880053931bc84e6d928-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:46:56,361 INFO [StoreOpener-fef909894bb25880053931bc84e6d928-1 {}] regionserver.HStore(327): Store=fef909894bb25880053931bc84e6d928/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:46:56,362 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1038): replaying wal for fef909894bb25880053931bc84e6d928 2024-12-11T20:46:56,364 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/fef909894bb25880053931bc84e6d928 2024-12-11T20:46:56,369 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/fef909894bb25880053931bc84e6d928 2024-12-11T20:46:56,370 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1048): stopping wal replay for fef909894bb25880053931bc84e6d928 2024-12-11T20:46:56,370 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1060): Cleaning up temporary data for fef909894bb25880053931bc84e6d928 2024-12-11T20:46:56,379 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1093): writing seq id for fef909894bb25880053931bc84e6d928 2024-12-11T20:46:56,389 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/fef909894bb25880053931bc84e6d928/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:46:56,391 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1114): Opened fef909894bb25880053931bc84e6d928; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65934512, jitterRate=-0.017499208450317383}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:46:56,391 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1122): Running coprocessor post-open hooks for fef909894bb25880053931bc84e6d928 2024-12-11T20:46:56,392 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1006): Region open journal for fef909894bb25880053931bc84e6d928: Running coprocessor pre-open hook at 1733950016341Writing region info on filesystem at 1733950016341Initializing all the Stores at 1733950016344 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950016344Cleaning up temporary data from old regions at 1733950016370 (+26 ms)Running coprocessor post-open hooks at 1733950016391 (+21 ms)Region opened successfully at 1733950016392 (+1 ms) 2024-12-11T20:46:56,395 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928., pid=20, masterSystemTime=1733950016326 2024-12-11T20:46:56,408 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928. 2024-12-11T20:46:56,408 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928. 2024-12-11T20:46:56,410 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=fef909894bb25880053931bc84e6d928, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:46:56,420 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure fef909894bb25880053931bc84e6d928, server=5be53b084ac7,40355,1733949994820 because future has completed 2024-12-11T20:46:56,444 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=20, resume processing ppid=19 2024-12-11T20:46:56,444 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=19, state=SUCCESS, hasLock=false; OpenRegionProcedure fef909894bb25880053931bc84e6d928, server=5be53b084ac7,40355,1733949994820 in 258 msec 2024-12-11T20:46:56,460 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-12-11T20:46:56,460 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=fef909894bb25880053931bc84e6d928, ASSIGN in 441 msec 2024-12-11T20:46:56,467 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T20:46:56,468 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950016467"}]},"ts":"1733950016467"} 2024-12-11T20:46:56,478 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-12-11T20:46:56,494 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T20:46:56,494 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-12-11T20:46:56,503 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-11T20:46:56,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:46:56,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:46:56,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:46:56,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:46:56,554 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T20:46:56,554 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T20:46:56,556 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T20:46:56,556 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T20:46:56,557 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T20:46:56,557 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T20:46:56,558 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T20:46:56,558 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T20:46:56,561 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion in 1.1430 sec 2024-12-11T20:46:56,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-11T20:46:56,564 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-11T20:46:56,565 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:46:56,568 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T20:46:58,388 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T20:46:58,531 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportFileSystemStateWithSplitRegion' 2024-12-11T20:47:01,774 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T20:47:01,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741857_1033 (size=134217728) 2024-12-11T20:47:01,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741857_1033 (size=134217728) 2024-12-11T20:47:01,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741857_1033 (size=134217728) 2024-12-11T20:47:03,982 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-12-11T20:47:03,982 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-12-11T20:47:05,490 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_754086280_22 at /127.0.0.1:35086 [Receiving block BP-464186549-172.17.0.2-1733949986605:blk_1073741858_1034] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 1510ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data2/, blockId=1073741858, seqno=3129 2024-12-11T20:47:06,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741858_1034 (size=134217728) 2024-12-11T20:47:06,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741858_1034 (size=134217728) 2024-12-11T20:47:06,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741858_1034 (size=134217728) 2024-12-11T20:47:07,925 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/output/cf/test_file is 35, key is 1\x00\x00\x00/cf:q/1733950016577/Put/seqid=0 2024-12-11T20:47:07,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741859_1035 (size=51979256) 2024-12-11T20:47:07,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741859_1035 (size=51979256) 2024-12-11T20:47:07,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741859_1035 (size=51979256) 2024-12-11T20:47:07,961 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c3bc1f4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:47:07,962 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:47:07,962 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:47:07,966 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:47:07,967 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:47:07,967 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:47:07,967 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a6e76e7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:47:07,967 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:47:07,968 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:47:07,979 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:47:07,984 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49856, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:47:07,988 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@fd5f8ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:47:07,989 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:47:07,992 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:47:07,994 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:47:08,006 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59114, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:47:08,061 WARN [Time-limited test {}] tool.BulkLoadHFilesTool$1(330): Trying to bulk load hfile hdfs://localhost:36717/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-12-11T20:47:08,062 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-11T20:47:08,065 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.AsyncConnectionImpl(321): The fetched master address is 5be53b084ac7,41779,1733949993169 2024-12-11T20:47:08,065 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3e996836 2024-12-11T20:47:08,066 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T20:47:08,069 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49864, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T20:47:08,084 WARN [IPC Server handler 2 on default port 36717 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-12-11T20:47:08,092 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928., hostname=5be53b084ac7,40355,1733949994820, seqNum=2] 2024-12-11T20:47:08,096 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:47:08,102 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52738, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:47:08,134 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T20:47:08,160 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:36717/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/output/cf/test_file first=Optional[1\x00\x00\x00] last=Optional[9\x00\x00\x00] 2024-12-11T20:47:08,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40355 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:47:08,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40355 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:47:08,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40355 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:47:08,195 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37959, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-12-11T20:47:08,197 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40781 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-11T20:47:08,201 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40781 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: ExecService size: 101 connection: 172.17.0.2:37959 deadline: 1733950088195, exception=org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 2024-12-11T20:47:08,204 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40355 {}] regionserver.SecureBulkLoadManager(227): unable to add token java.util.concurrent.ExecutionException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:396) ~[?:?] at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073) ~[?:?] at org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager.secureBulkLoadHFiles(SecureBulkLoadManager.java:221) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.bulkLoadHFile(RSRpcServices.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43510) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] Caused by: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.RegionCoprocessorRpcChannelImpl.lambda$rpcCall$0(RegionCoprocessorRpcChannelImpl.java:90) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T20:47:08,215 WARN [IPC Server handler 2 on default port 36717 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-12-11T20:47:08,265 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40355 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:36717/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/output/cf/test_file for inclusion in fef909894bb25880053931bc84e6d928/cf 2024-12-11T20:47:08,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40355 {}] regionserver.HStore(626): HFile bounds: first=1\x00\x00\x00 last=9\x00\x00\x00 2024-12-11T20:47:08,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40355 {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-11T20:47:08,304 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40355 {}] regionserver.HStore(641): Trying to bulk load hfile hdfs://localhost:36717/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-12-11T20:47:08,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40355 {}] regionserver.HRegion(2603): Flush status journal for fef909894bb25880053931bc84e6d928: 2024-12-11T20:47:08,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40355 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:36717/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/output/cf/test_file to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/staging/jenkins__testExportFileSystemStateWithSplitRegion__59e3gapu0ktj671eho25q8jvi4qqik9nr2if6h0jubnvnmr69crcnlm25oofvj3v/cf/test_file 2024-12-11T20:47:08,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40355 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/staging/jenkins__testExportFileSystemStateWithSplitRegion__59e3gapu0ktj671eho25q8jvi4qqik9nr2if6h0jubnvnmr69crcnlm25oofvj3v/cf/test_file as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/fef909894bb25880053931bc84e6d928/cf/7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_ 2024-12-11T20:47:08,339 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40355 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/staging/jenkins__testExportFileSystemStateWithSplitRegion__59e3gapu0ktj671eho25q8jvi4qqik9nr2if6h0jubnvnmr69crcnlm25oofvj3v/cf/test_file into fef909894bb25880053931bc84e6d928/cf as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/fef909894bb25880053931bc84e6d928/cf/7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_ - updating store file list. 2024-12-11T20:47:08,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40355 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-11T20:47:08,386 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40355 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/fef909894bb25880053931bc84e6d928/cf/7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_ into fef909894bb25880053931bc84e6d928/cf 2024-12-11T20:47:08,386 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40355 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/staging/jenkins__testExportFileSystemStateWithSplitRegion__59e3gapu0ktj671eho25q8jvi4qqik9nr2if6h0jubnvnmr69crcnlm25oofvj3v/cf/test_file into fef909894bb25880053931bc84e6d928/cf (new location: hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/fef909894bb25880053931bc84e6d928/cf/7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_) 2024-12-11T20:47:08,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40355 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/staging/jenkins__testExportFileSystemStateWithSplitRegion__59e3gapu0ktj671eho25q8jvi4qqik9nr2if6h0jubnvnmr69crcnlm25oofvj3v/cf/test_file 2024-12-11T20:47:08,403 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-11T20:47:08,403 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.bulkLoad(BulkLoadHFilesTool.java:1125) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.run(BulkLoadHFilesTool.java:1176) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemStateWithSplitRegion(TestExportSnapshot.java:229) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T20:47:08,403 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:47:08,403 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:47:08,405 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928., hostname=5be53b084ac7,40355,1733949994820, seqNum=2 , the old value is region=testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928., hostname=5be53b084ac7,40355,1733949994820, seqNum=2, error=org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Call to address=5be53b084ac7:40355 failed on local exception: org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-12-11T20:47:08,406 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:47:08,406 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928., hostname=5be53b084ac7,40355,1733949994820, seqNum=2 is org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-12-11T20:47:08,406 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928., hostname=5be53b084ac7,40355,1733949994820, seqNum=2 from cache 2024-12-11T20:47:08,408 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T20:47:08,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40355 {}] ipc.CallRunner(93): RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40355: skipped callId: 7 service: ClientService methodName: CleanupBulkLoad size: 336 connection: 172.17.0.2:52738 deadline: 1733950088403 param: TODO: class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$CleanupBulkLoadRequest connection: 172.17.0.2:52738 2024-12-11T20:47:08,445 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='5', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928., hostname=5be53b084ac7,40355,1733949994820, seqNum=2] 2024-12-11T20:47:08,470 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster$3(2313): Client=jenkins//172.17.0.2 split testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928. 2024-12-11T20:47:08,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=5be53b084ac7,40355,1733949994820 2024-12-11T20:47:08,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=fef909894bb25880053931bc84e6d928, daughterA=b883908bc6626ac20c7479c3ccb08152, daughterB=1c282182bbc4558ccfab0d62e02499bf 2024-12-11T20:47:08,500 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=fef909894bb25880053931bc84e6d928, daughterA=b883908bc6626ac20c7479c3ccb08152, daughterB=1c282182bbc4558ccfab0d62e02499bf 2024-12-11T20:47:08,500 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=fef909894bb25880053931bc84e6d928, daughterA=b883908bc6626ac20c7479c3ccb08152, daughterB=1c282182bbc4558ccfab0d62e02499bf 2024-12-11T20:47:08,500 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=fef909894bb25880053931bc84e6d928, daughterA=b883908bc6626ac20c7479c3ccb08152, daughterB=1c282182bbc4558ccfab0d62e02499bf 2024-12-11T20:47:08,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-11T20:47:08,514 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=fef909894bb25880053931bc84e6d928, UNASSIGN}] 2024-12-11T20:47:08,513 WARN [Async-Client-Retry-Timer-pool-0 {}] client.AsyncNonMetaRegionLocator(265): Failed to locate region in 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5be53b084ac7:40781 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.scan(ClientProtos.java:43851) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.callOpenScanner(AsyncClientScanner.java:177) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:242) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.timelineConsistentRead(ConnectionUtils.java:442) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:255) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.start(AsyncClientScanner.java:275) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:617) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:91) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.locateInMeta(AsyncNonMetaRegionLocator.java:408) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocationsInternal(AsyncNonMetaRegionLocator.java:516) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocations(AsyncNonMetaRegionLocator.java:529) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.lambda$getRegionLocation$7(AsyncRegionLocator.java:164) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.tracedLocationFuture(AsyncRegionLocator.java:106) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:158) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:193) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.lambda$tryScheduleRetry$1(AsyncRpcRetryingCaller.java:139) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.run(HashedWheelTimer.java:713) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.ImmediateExecutor.execute(ImmediateExecutor.java:34) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.expire(HashedWheelTimer.java:701) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelBucket.expireTimeouts(HashedWheelTimer.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 34 more 2024-12-11T20:47:08,517 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=fef909894bb25880053931bc84e6d928, UNASSIGN 2024-12-11T20:47:08,521 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=fef909894bb25880053931bc84e6d928, regionState=CLOSING, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:47:08,533 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=fef909894bb25880053931bc84e6d928, UNASSIGN because future has completed 2024-12-11T20:47:08,534 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-11T20:47:08,534 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure fef909894bb25880053931bc84e6d928, server=5be53b084ac7,40355,1733949994820}] 2024-12-11T20:47:08,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-11T20:47:08,732 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(122): Close fef909894bb25880053931bc84e6d928 2024-12-11T20:47:08,732 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-11T20:47:08,734 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1722): Closing fef909894bb25880053931bc84e6d928, disabling compactions & flushes 2024-12-11T20:47:08,734 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928. 2024-12-11T20:47:08,734 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928. 2024-12-11T20:47:08,734 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928. after waiting 0 ms 2024-12-11T20:47:08,734 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928. 2024-12-11T20:47:08,799 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/fef909894bb25880053931bc84e6d928/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=1 2024-12-11T20:47:08,804 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:47:08,804 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928. 2024-12-11T20:47:08,804 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1676): Region close journal for fef909894bb25880053931bc84e6d928: Waiting for close lock at 1733950028733Running coprocessor pre-close hooks at 1733950028733Disabling compacts and flushes for region at 1733950028733Disabling writes for close at 1733950028734 (+1 ms)Writing region close event to WAL at 1733950028759 (+25 ms)Running coprocessor post-close hooks at 1733950028801 (+42 ms)Closed at 1733950028804 (+3 ms) 2024-12-11T20:47:08,811 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(157): Closed fef909894bb25880053931bc84e6d928 2024-12-11T20:47:08,827 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=fef909894bb25880053931bc84e6d928, regionState=CLOSED 2024-12-11T20:47:08,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-11T20:47:08,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure fef909894bb25880053931bc84e6d928, server=5be53b084ac7,40355,1733949994820 because future has completed 2024-12-11T20:47:08,873 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=22 2024-12-11T20:47:08,873 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=22, state=SUCCESS, hasLock=false; CloseRegionProcedure fef909894bb25880053931bc84e6d928, server=5be53b084ac7,40355,1733949994820 in 321 msec 2024-12-11T20:47:08,881 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=22, resume processing ppid=21 2024-12-11T20:47:08,881 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=fef909894bb25880053931bc84e6d928, UNASSIGN in 359 msec 2024-12-11T20:47:08,910 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:47:08,916 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=21 splitting 1 storefiles, region=fef909894bb25880053931bc84e6d928, threads=1 2024-12-11T20:47:08,929 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=21 splitting started for store file: hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/fef909894bb25880053931bc84e6d928/cf/7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_ for region: fef909894bb25880053931bc84e6d928 2024-12-11T20:47:08,960 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-11T20:47:09,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-11T20:47:09,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-11T20:47:10,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-11T20:47:12,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-11T20:47:13,545 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_754086280_22 at /127.0.0.1:60728 [Receiving block BP-464186549-172.17.0.2-1733949986605:blk_1073741830_1006] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 4453ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data6/, blockId=1073741830, seqno=192 2024-12-11T20:47:13,545 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_754086280_22 at /127.0.0.1:52670 [Receiving block BP-464186549-172.17.0.2-1733949986605:blk_1073741830_1006] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 4453ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data2/, blockId=1073741830, seqno=192 2024-12-11T20:47:13,546 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_754086280_22 at /127.0.0.1:54934 [Receiving block BP-464186549-172.17.0.2-1733949986605:blk_1073741830_1006] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 4454ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data4/, blockId=1073741830, seqno=192 2024-12-11T20:47:13,549 INFO [AsyncFSWAL-0-hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/MasterData-prefix:5be53b084ac7,41779,1733949993169 {}] wal.AbstractFSWAL(1368): Slow sync cost: 4457 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46705,DS-f27da252-d217-4063-b080-6d805699a8f4,DISK], DatanodeInfoWithStorage[127.0.0.1:45301,DS-84276d66-e551-476e-a085-4f7c53a4846b,DISK], DatanodeInfoWithStorage[127.0.0.1:44295,DS-58496555-acc8-4e14-a2aa-6653727f2410,DISK]] 2024-12-11T20:47:13,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741860_1036 (size=21) 2024-12-11T20:47:13,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741860_1036 (size=21) 2024-12-11T20:47:13,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741860_1036 (size=21) 2024-12-11T20:47:13,579 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-11T20:47:13,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741861_1037 (size=21) 2024-12-11T20:47:13,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741861_1037 (size=21) 2024-12-11T20:47:13,616 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=21 splitting complete for store file: hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/fef909894bb25880053931bc84e6d928/cf/7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_ for region: fef909894bb25880053931bc84e6d928 2024-12-11T20:47:13,619 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=21 split storefiles for region fef909894bb25880053931bc84e6d928 Daughter A: [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/b883908bc6626ac20c7479c3ccb08152/cf/7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_.fef909894bb25880053931bc84e6d928] storefiles, Daughter B: [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/1c282182bbc4558ccfab0d62e02499bf/cf/7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_.fef909894bb25880053931bc84e6d928] storefiles. 2024-12-11T20:47:13,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741861_1037 (size=21) 2024-12-11T20:47:13,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741862_1038 (size=76) 2024-12-11T20:47:13,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741862_1038 (size=76) 2024-12-11T20:47:13,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741862_1038 (size=76) 2024-12-11T20:47:13,657 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:47:13,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741863_1039 (size=76) 2024-12-11T20:47:13,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741863_1039 (size=76) 2024-12-11T20:47:13,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741863_1039 (size=76) 2024-12-11T20:47:13,704 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:47:13,735 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/b883908bc6626ac20c7479c3ccb08152/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-12-11T20:47:13,738 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/1c282182bbc4558ccfab0d62e02499bf/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-12-11T20:47:13,743 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1733950033743"},{"qualifier":"splitA","vlen":75,"tag":[],"timestamp":"1733950033743"},{"qualifier":"splitB","vlen":75,"tag":[],"timestamp":"1733950033743"}]},"ts":"1733950033743"} 2024-12-11T20:47:13,744 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1733950028481.b883908bc6626ac20c7479c3ccb08152.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733950033743"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950033743"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733950033743"}]},"ts":"1733950033743"} 2024-12-11T20:47:13,744 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,5,1733950028481.1c282182bbc4558ccfab0d62e02499bf.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733950033743"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950033743"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733950033743"}]},"ts":"1733950033743"} 2024-12-11T20:47:13,769 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=b883908bc6626ac20c7479c3ccb08152, ASSIGN}, {pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=1c282182bbc4558ccfab0d62e02499bf, ASSIGN}] 2024-12-11T20:47:13,771 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=1c282182bbc4558ccfab0d62e02499bf, ASSIGN 2024-12-11T20:47:13,771 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=b883908bc6626ac20c7479c3ccb08152, ASSIGN 2024-12-11T20:47:13,773 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=1c282182bbc4558ccfab0d62e02499bf, ASSIGN; state=SPLITTING_NEW, location=5be53b084ac7,40355,1733949994820; forceNewPlan=false, retain=false 2024-12-11T20:47:13,774 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=b883908bc6626ac20c7479c3ccb08152, ASSIGN; state=SPLITTING_NEW, location=5be53b084ac7,40355,1733949994820; forceNewPlan=false, retain=false 2024-12-11T20:47:13,923 INFO [5be53b084ac7:41779 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T20:47:13,924 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=1c282182bbc4558ccfab0d62e02499bf, regionState=OPENING, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:47:13,925 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=b883908bc6626ac20c7479c3ccb08152, regionState=OPENING, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:47:13,939 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=1c282182bbc4558ccfab0d62e02499bf, ASSIGN because future has completed 2024-12-11T20:47:13,943 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=26, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1c282182bbc4558ccfab0d62e02499bf, server=5be53b084ac7,40355,1733949994820}] 2024-12-11T20:47:13,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=b883908bc6626ac20c7479c3ccb08152, ASSIGN because future has completed 2024-12-11T20:47:13,953 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=27, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure b883908bc6626ac20c7479c3ccb08152, server=5be53b084ac7,40355,1733949994820}] 2024-12-11T20:47:14,113 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,5,1733950028481.1c282182bbc4558ccfab0d62e02499bf. 2024-12-11T20:47:14,114 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7752): Opening region: {ENCODED => 1c282182bbc4558ccfab0d62e02499bf, NAME => 'testExportFileSystemStateWithSplitRegion,5,1733950028481.1c282182bbc4558ccfab0d62e02499bf.', STARTKEY => '5', ENDKEY => ''} 2024-12-11T20:47:14,114 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,5,1733950028481.1c282182bbc4558ccfab0d62e02499bf. service=AccessControlService 2024-12-11T20:47:14,114 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:47:14,115 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 1c282182bbc4558ccfab0d62e02499bf 2024-12-11T20:47:14,115 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,5,1733950028481.1c282182bbc4558ccfab0d62e02499bf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:47:14,115 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7794): checking encryption for 1c282182bbc4558ccfab0d62e02499bf 2024-12-11T20:47:14,115 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7797): checking classloading for 1c282182bbc4558ccfab0d62e02499bf 2024-12-11T20:47:14,128 INFO [StoreOpener-1c282182bbc4558ccfab0d62e02499bf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1c282182bbc4558ccfab0d62e02499bf 2024-12-11T20:47:14,132 INFO [StoreOpener-1c282182bbc4558ccfab0d62e02499bf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1c282182bbc4558ccfab0d62e02499bf columnFamilyName cf 2024-12-11T20:47:14,132 DEBUG [StoreOpener-1c282182bbc4558ccfab0d62e02499bf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:47:14,188 DEBUG [StoreFileOpener-1c282182bbc4558ccfab0d62e02499bf-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_.fef909894bb25880053931bc84e6d928: NONE, but ROW specified in column family configuration 2024-12-11T20:47:14,228 DEBUG [StoreOpener-1c282182bbc4558ccfab0d62e02499bf-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/1c282182bbc4558ccfab0d62e02499bf/cf/7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_.fef909894bb25880053931bc84e6d928->hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/fef909894bb25880053931bc84e6d928/cf/7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_-top 2024-12-11T20:47:14,229 INFO [StoreOpener-1c282182bbc4558ccfab0d62e02499bf-1 {}] regionserver.HStore(327): Store=1c282182bbc4558ccfab0d62e02499bf/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:47:14,231 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1038): replaying wal for 1c282182bbc4558ccfab0d62e02499bf 2024-12-11T20:47:14,235 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/1c282182bbc4558ccfab0d62e02499bf 2024-12-11T20:47:14,238 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/1c282182bbc4558ccfab0d62e02499bf 2024-12-11T20:47:14,238 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1048): stopping wal replay for 1c282182bbc4558ccfab0d62e02499bf 2024-12-11T20:47:14,239 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1060): Cleaning up temporary data for 1c282182bbc4558ccfab0d62e02499bf 2024-12-11T20:47:14,263 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1093): writing seq id for 1c282182bbc4558ccfab0d62e02499bf 2024-12-11T20:47:14,268 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1114): Opened 1c282182bbc4558ccfab0d62e02499bf; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59624542, jitterRate=-0.11152508854866028}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:47:14,268 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1c282182bbc4558ccfab0d62e02499bf 2024-12-11T20:47:14,269 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1006): Region open journal for 1c282182bbc4558ccfab0d62e02499bf: Running coprocessor pre-open hook at 1733950034115Writing region info on filesystem at 1733950034115Initializing all the Stores at 1733950034127 (+12 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950034127Cleaning up temporary data from old regions at 1733950034239 (+112 ms)Running coprocessor post-open hooks at 1733950034268 (+29 ms)Region opened successfully at 1733950034269 (+1 ms) 2024-12-11T20:47:14,279 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,5,1733950028481.1c282182bbc4558ccfab0d62e02499bf., pid=26, masterSystemTime=1733950034099 2024-12-11T20:47:14,280 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,5,1733950028481.1c282182bbc4558ccfab0d62e02499bf.,because compaction is disabled. 2024-12-11T20:47:14,291 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,5,1733950028481.1c282182bbc4558ccfab0d62e02499bf. 2024-12-11T20:47:14,292 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,5,1733950028481.1c282182bbc4558ccfab0d62e02499bf. 2024-12-11T20:47:14,292 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1733950028481.b883908bc6626ac20c7479c3ccb08152. 2024-12-11T20:47:14,292 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7752): Opening region: {ENCODED => b883908bc6626ac20c7479c3ccb08152, NAME => 'testExportFileSystemStateWithSplitRegion,,1733950028481.b883908bc6626ac20c7479c3ccb08152.', STARTKEY => '', ENDKEY => '5'} 2024-12-11T20:47:14,293 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1733950028481.b883908bc6626ac20c7479c3ccb08152. service=AccessControlService 2024-12-11T20:47:14,293 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:47:14,293 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion b883908bc6626ac20c7479c3ccb08152 2024-12-11T20:47:14,293 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=1c282182bbc4558ccfab0d62e02499bf, regionState=OPEN, openSeqNum=7, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:47:14,293 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733950028481.b883908bc6626ac20c7479c3ccb08152.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:47:14,294 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7794): checking encryption for b883908bc6626ac20c7479c3ccb08152 2024-12-11T20:47:14,294 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7797): checking classloading for b883908bc6626ac20c7479c3ccb08152 2024-12-11T20:47:14,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=26, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1c282182bbc4558ccfab0d62e02499bf, server=5be53b084ac7,40355,1733949994820 because future has completed 2024-12-11T20:47:14,308 INFO [StoreOpener-b883908bc6626ac20c7479c3ccb08152-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b883908bc6626ac20c7479c3ccb08152 2024-12-11T20:47:14,309 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=26, resume processing ppid=25 2024-12-11T20:47:14,309 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, ppid=25, state=SUCCESS, hasLock=false; OpenRegionProcedure 1c282182bbc4558ccfab0d62e02499bf, server=5be53b084ac7,40355,1733949994820 in 360 msec 2024-12-11T20:47:14,309 INFO [StoreOpener-b883908bc6626ac20c7479c3ccb08152-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b883908bc6626ac20c7479c3ccb08152 columnFamilyName cf 2024-12-11T20:47:14,309 DEBUG [StoreOpener-b883908bc6626ac20c7479c3ccb08152-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:47:14,317 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=1c282182bbc4558ccfab0d62e02499bf, ASSIGN in 540 msec 2024-12-11T20:47:14,357 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T20:47:14,369 DEBUG [StoreFileOpener-b883908bc6626ac20c7479c3ccb08152-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_.fef909894bb25880053931bc84e6d928: NONE, but ROW specified in column family configuration 2024-12-11T20:47:14,374 DEBUG [StoreOpener-b883908bc6626ac20c7479c3ccb08152-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/b883908bc6626ac20c7479c3ccb08152/cf/7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_.fef909894bb25880053931bc84e6d928->hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/fef909894bb25880053931bc84e6d928/cf/7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_-bottom 2024-12-11T20:47:14,375 INFO [StoreOpener-b883908bc6626ac20c7479c3ccb08152-1 {}] regionserver.HStore(327): Store=b883908bc6626ac20c7479c3ccb08152/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:47:14,376 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1038): replaying wal for b883908bc6626ac20c7479c3ccb08152 2024-12-11T20:47:14,383 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/b883908bc6626ac20c7479c3ccb08152 2024-12-11T20:47:14,392 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/b883908bc6626ac20c7479c3ccb08152 2024-12-11T20:47:14,399 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1048): stopping wal replay for b883908bc6626ac20c7479c3ccb08152 2024-12-11T20:47:14,399 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1060): Cleaning up temporary data for b883908bc6626ac20c7479c3ccb08152 2024-12-11T20:47:14,414 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1093): writing seq id for b883908bc6626ac20c7479c3ccb08152 2024-12-11T20:47:14,416 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1114): Opened b883908bc6626ac20c7479c3ccb08152; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69005362, jitterRate=0.028260022401809692}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:47:14,416 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b883908bc6626ac20c7479c3ccb08152 2024-12-11T20:47:14,417 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1006): Region open journal for b883908bc6626ac20c7479c3ccb08152: Running coprocessor pre-open hook at 1733950034294Writing region info on filesystem at 1733950034294Initializing all the Stores at 1733950034303 (+9 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950034303Cleaning up temporary data from old regions at 1733950034399 (+96 ms)Running coprocessor post-open hooks at 1733950034416 (+17 ms)Region opened successfully at 1733950034416 2024-12-11T20:47:14,424 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1733950028481.b883908bc6626ac20c7479c3ccb08152., pid=27, masterSystemTime=1733950034099 2024-12-11T20:47:14,424 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,,1733950028481.b883908bc6626ac20c7479c3ccb08152.,because compaction is disabled. 2024-12-11T20:47:14,428 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1733950028481.b883908bc6626ac20c7479c3ccb08152. 2024-12-11T20:47:14,428 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1733950028481.b883908bc6626ac20c7479c3ccb08152. 2024-12-11T20:47:14,433 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=b883908bc6626ac20c7479c3ccb08152, regionState=OPEN, openSeqNum=7, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:47:14,439 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=27, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure b883908bc6626ac20c7479c3ccb08152, server=5be53b084ac7,40355,1733949994820 because future has completed 2024-12-11T20:47:14,455 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=27, resume processing ppid=24 2024-12-11T20:47:14,455 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, ppid=24, state=SUCCESS, hasLock=false; OpenRegionProcedure b883908bc6626ac20c7479c3ccb08152, server=5be53b084ac7,40355,1733949994820 in 493 msec 2024-12-11T20:47:14,471 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=24, resume processing ppid=21 2024-12-11T20:47:14,471 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=b883908bc6626ac20c7479c3ccb08152, ASSIGN in 686 msec 2024-12-11T20:47:14,477 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=fef909894bb25880053931bc84e6d928, daughterA=b883908bc6626ac20c7479c3ccb08152, daughterB=1c282182bbc4558ccfab0d62e02499bf in 5.9880 sec 2024-12-11T20:47:16,537 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportFileSystemStateWithSplitRegion' 2024-12-11T20:47:16,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-11T20:47:16,703 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T20:47:16,703 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SPLIT_REGION, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-11T20:47:16,708 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-11T20:47:16,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733950036708 (current time:1733950036708). 2024-12-11T20:47:16,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T20:47:16,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-11T20:47:16,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T20:47:16,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e79b080, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:47:16,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:47:16,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:47:16,711 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:47:16,711 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:47:16,711 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:47:16,711 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@443addef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:47:16,711 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:47:16,712 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:47:16,712 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:47:16,714 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43794, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:47:16,716 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4dfed81c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:47:16,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:47:16,717 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:47:16,718 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:47:16,720 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33488, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:47:16,724 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779. 2024-12-11T20:47:16,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:47:16,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:47:16,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:47:16,726 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:47:16,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ceb2682, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:47:16,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:47:16,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:47:16,728 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:47:16,728 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:47:16,728 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:47:16,729 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48c2f4d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:47:16,729 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:47:16,730 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:47:16,730 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:47:16,731 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43818, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:47:16,732 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c895bf7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:47:16,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:47:16,733 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:47:16,734 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:47:16,735 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33490, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:47:16,738 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:47:16,738 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:47:16,740 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51492, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:47:16,742 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779. 2024-12-11T20:47:16,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:47:16,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:47:16,742 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:47:16,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:47:16,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-11T20:47:16,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T20:47:16,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-11T20:47:16,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-12-11T20:47:16,750 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T20:47:16,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-11T20:47:16,751 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T20:47:16,755 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T20:47:16,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741864_1040 (size=197) 2024-12-11T20:47:16,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741864_1040 (size=197) 2024-12-11T20:47:16,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741864_1040 (size=197) 2024-12-11T20:47:16,776 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T20:47:16,776 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b883908bc6626ac20c7479c3ccb08152}, {pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1c282182bbc4558ccfab0d62e02499bf}] 2024-12-11T20:47:16,779 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1c282182bbc4558ccfab0d62e02499bf 2024-12-11T20:47:16,779 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b883908bc6626ac20c7479c3ccb08152 2024-12-11T20:47:16,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-11T20:47:16,931 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40355 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=30 2024-12-11T20:47:16,932 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40355 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=29 2024-12-11T20:47:16,932 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,5,1733950028481.1c282182bbc4558ccfab0d62e02499bf. 2024-12-11T20:47:16,932 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,,1733950028481.b883908bc6626ac20c7479c3ccb08152. 2024-12-11T20:47:16,932 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.HRegion(2603): Flush status journal for 1c282182bbc4558ccfab0d62e02499bf: 2024-12-11T20:47:16,932 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.HRegion(2603): Flush status journal for b883908bc6626ac20c7479c3ccb08152: 2024-12-11T20:47:16,932 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,5,1733950028481.1c282182bbc4558ccfab0d62e02499bf. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-12-11T20:47:16,932 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,,1733950028481.b883908bc6626ac20c7479c3ccb08152. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-12-11T20:47:16,932 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,5,1733950028481.1c282182bbc4558ccfab0d62e02499bf.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T20:47:16,932 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,,1733950028481.b883908bc6626ac20c7479c3ccb08152.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T20:47:16,932 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:47:16,932 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:47:16,933 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/b883908bc6626ac20c7479c3ccb08152/cf/7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_.fef909894bb25880053931bc84e6d928->hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/fef909894bb25880053931bc84e6d928/cf/7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_-bottom] hfiles 2024-12-11T20:47:16,933 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/1c282182bbc4558ccfab0d62e02499bf/cf/7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_.fef909894bb25880053931bc84e6d928->hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/fef909894bb25880053931bc84e6d928/cf/7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_-top] hfiles 2024-12-11T20:47:16,933 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/b883908bc6626ac20c7479c3ccb08152/cf/7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_.fef909894bb25880053931bc84e6d928 for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T20:47:16,933 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/1c282182bbc4558ccfab0d62e02499bf/cf/7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_.fef909894bb25880053931bc84e6d928 for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T20:47:16,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741865_1041 (size=182) 2024-12-11T20:47:16,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741865_1041 (size=182) 2024-12-11T20:47:16,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741865_1041 (size=182) 2024-12-11T20:47:17,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741866_1042 (size=182) 2024-12-11T20:47:17,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741866_1042 (size=182) 2024-12-11T20:47:17,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741866_1042 (size=182) 2024-12-11T20:47:17,010 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,,1733950028481.b883908bc6626ac20c7479c3ccb08152. 2024-12-11T20:47:17,010 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-12-11T20:47:17,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=29 2024-12-11T20:47:17,016 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region b883908bc6626ac20c7479c3ccb08152 2024-12-11T20:47:17,016 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b883908bc6626ac20c7479c3ccb08152 2024-12-11T20:47:17,029 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b883908bc6626ac20c7479c3ccb08152 in 242 msec 2024-12-11T20:47:17,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-11T20:47:17,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-11T20:47:17,391 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,5,1733950028481.1c282182bbc4558ccfab0d62e02499bf. 2024-12-11T20:47:17,391 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=30 2024-12-11T20:47:17,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=30 2024-12-11T20:47:17,394 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region 1c282182bbc4558ccfab0d62e02499bf 2024-12-11T20:47:17,395 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1c282182bbc4558ccfab0d62e02499bf 2024-12-11T20:47:17,408 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=30, resume processing ppid=28 2024-12-11T20:47:17,408 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T20:47:17,408 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1c282182bbc4558ccfab0d62e02499bf in 622 msec 2024-12-11T20:47:17,412 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T20:47:17,412 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T20:47:17,412 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:47:17,415 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/fef909894bb25880053931bc84e6d928/cf/7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_] hfiles 2024-12-11T20:47:17,415 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/1): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/fef909894bb25880053931bc84e6d928/cf/7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_ 2024-12-11T20:47:17,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741867_1043 (size=129) 2024-12-11T20:47:17,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741867_1043 (size=129) 2024-12-11T20:47:17,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741867_1043 (size=129) 2024-12-11T20:47:17,480 INFO [SplitRegionsSnapshotPool-pool-0 {}] procedure.SnapshotProcedure$1(378): take snapshot region={ENCODED => fef909894bb25880053931bc84e6d928, NAME => 'testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928.', STARTKEY => '', ENDKEY => '', OFFLINE => true, SPLIT => true}, table=testExportFileSystemStateWithSplitRegion 2024-12-11T20:47:17,483 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T20:47:17,485 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T20:47:17,485 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T20:47:17,486 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T20:47:17,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741868_1044 (size=891) 2024-12-11T20:47:17,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741868_1044 (size=891) 2024-12-11T20:47:17,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741868_1044 (size=891) 2024-12-11T20:47:17,603 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T20:47:17,655 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T20:47:17,667 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T20:47:17,680 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T20:47:17,680 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-12-11T20:47:17,692 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 936 msec 2024-12-11T20:47:17,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-11T20:47:17,883 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-11T20:47:17,887 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950037886 2024-12-11T20:47:17,887 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:36717, tgtDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950037886, rawTgtDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950037886, srcFsUri=hdfs://localhost:36717, srcDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:47:17,977 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:36717, inputRoot=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:47:17,977 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_754086280_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950037886, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950037886/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T20:47:17,988 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T20:47:18,035 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950037886/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T20:47:18,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741869_1045 (size=197) 2024-12-11T20:47:18,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741869_1045 (size=197) 2024-12-11T20:47:18,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741869_1045 (size=197) 2024-12-11T20:47:18,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741870_1046 (size=891) 2024-12-11T20:47:18,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741870_1046 (size=891) 2024-12-11T20:47:18,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741870_1046 (size=891) 2024-12-11T20:47:18,184 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:47:18,185 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:47:18,185 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:47:19,366 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop-17458894714101658295.jar 2024-12-11T20:47:19,367 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:47:19,368 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:47:19,510 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop-2863736878834211832.jar 2024-12-11T20:47:19,511 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:47:19,511 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:47:19,512 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:47:19,513 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:47:19,514 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:47:19,515 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:47:19,515 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T20:47:19,516 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T20:47:19,516 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T20:47:19,517 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T20:47:19,518 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T20:47:19,518 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T20:47:19,519 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T20:47:19,519 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T20:47:19,520 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T20:47:19,520 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T20:47:19,521 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T20:47:19,524 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:47:19,525 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:47:19,525 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:47:19,526 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:47:19,526 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:47:19,526 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:47:19,527 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:47:19,618 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T20:47:19,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741871_1047 (size=131440) 2024-12-11T20:47:19,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741871_1047 (size=131440) 2024-12-11T20:47:19,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741871_1047 (size=131440) 2024-12-11T20:47:19,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741872_1048 (size=4188619) 2024-12-11T20:47:19,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741872_1048 (size=4188619) 2024-12-11T20:47:19,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741872_1048 (size=4188619) 2024-12-11T20:47:19,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741873_1049 (size=1323991) 2024-12-11T20:47:19,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741873_1049 (size=1323991) 2024-12-11T20:47:19,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741873_1049 (size=1323991) 2024-12-11T20:47:19,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741874_1050 (size=903544) 2024-12-11T20:47:19,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741874_1050 (size=903544) 2024-12-11T20:47:19,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741874_1050 (size=903544) 2024-12-11T20:47:20,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741875_1051 (size=8360360) 2024-12-11T20:47:20,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741875_1051 (size=8360360) 2024-12-11T20:47:20,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741875_1051 (size=8360360) 2024-12-11T20:47:20,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741876_1052 (size=443172) 2024-12-11T20:47:20,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741876_1052 (size=443172) 2024-12-11T20:47:20,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741876_1052 (size=443172) 2024-12-11T20:47:20,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741877_1053 (size=1877034) 2024-12-11T20:47:20,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741877_1053 (size=1877034) 2024-12-11T20:47:20,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741877_1053 (size=1877034) 2024-12-11T20:47:20,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741878_1054 (size=77835) 2024-12-11T20:47:20,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741878_1054 (size=77835) 2024-12-11T20:47:20,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741878_1054 (size=77835) 2024-12-11T20:47:20,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741879_1055 (size=30949) 2024-12-11T20:47:20,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741879_1055 (size=30949) 2024-12-11T20:47:20,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741879_1055 (size=30949) 2024-12-11T20:47:20,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741880_1056 (size=1597213) 2024-12-11T20:47:20,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741880_1056 (size=1597213) 2024-12-11T20:47:20,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741880_1056 (size=1597213) 2024-12-11T20:47:20,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741881_1057 (size=4695811) 2024-12-11T20:47:20,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741881_1057 (size=4695811) 2024-12-11T20:47:20,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741881_1057 (size=4695811) 2024-12-11T20:47:20,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741882_1058 (size=232957) 2024-12-11T20:47:20,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741882_1058 (size=232957) 2024-12-11T20:47:20,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741882_1058 (size=232957) 2024-12-11T20:47:20,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741883_1059 (size=127628) 2024-12-11T20:47:20,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741883_1059 (size=127628) 2024-12-11T20:47:20,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741883_1059 (size=127628) 2024-12-11T20:47:20,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741884_1060 (size=20406) 2024-12-11T20:47:20,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741884_1060 (size=20406) 2024-12-11T20:47:20,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741884_1060 (size=20406) 2024-12-11T20:47:20,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741885_1061 (size=5175431) 2024-12-11T20:47:20,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741885_1061 (size=5175431) 2024-12-11T20:47:20,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741885_1061 (size=5175431) 2024-12-11T20:47:20,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741886_1062 (size=217634) 2024-12-11T20:47:20,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741886_1062 (size=217634) 2024-12-11T20:47:20,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741886_1062 (size=217634) 2024-12-11T20:47:20,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741887_1063 (size=1832290) 2024-12-11T20:47:20,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741887_1063 (size=1832290) 2024-12-11T20:47:20,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741887_1063 (size=1832290) 2024-12-11T20:47:20,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741888_1064 (size=322274) 2024-12-11T20:47:20,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741888_1064 (size=322274) 2024-12-11T20:47:20,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741888_1064 (size=322274) 2024-12-11T20:47:20,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741889_1065 (size=6426228) 2024-12-11T20:47:20,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741889_1065 (size=6426228) 2024-12-11T20:47:20,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741889_1065 (size=6426228) 2024-12-11T20:47:20,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741890_1066 (size=503880) 2024-12-11T20:47:20,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741890_1066 (size=503880) 2024-12-11T20:47:20,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741890_1066 (size=503880) 2024-12-11T20:47:20,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741891_1067 (size=29229) 2024-12-11T20:47:20,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741891_1067 (size=29229) 2024-12-11T20:47:20,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741891_1067 (size=29229) 2024-12-11T20:47:20,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741892_1068 (size=24096) 2024-12-11T20:47:20,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741892_1068 (size=24096) 2024-12-11T20:47:20,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741892_1068 (size=24096) 2024-12-11T20:47:20,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741893_1069 (size=111872) 2024-12-11T20:47:20,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741893_1069 (size=111872) 2024-12-11T20:47:20,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741893_1069 (size=111872) 2024-12-11T20:47:21,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741894_1070 (size=45609) 2024-12-11T20:47:21,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741894_1070 (size=45609) 2024-12-11T20:47:21,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741894_1070 (size=45609) 2024-12-11T20:47:21,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741895_1071 (size=136454) 2024-12-11T20:47:21,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741895_1071 (size=136454) 2024-12-11T20:47:21,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741895_1071 (size=136454) 2024-12-11T20:47:21,037 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T20:47:21,042 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snapshot-testExportFileSystemStateWithSplitRegion' hfile list 2024-12-11T20:47:21,049 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=fef909894bb25880053931bc84e6d928-7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_. 2024-12-11T20:47:21,050 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=fef909894bb25880053931bc84e6d928-7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_. 2024-12-11T20:47:21,050 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=305.6 M 2024-12-11T20:47:21,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741896_1072 (size=244) 2024-12-11T20:47:21,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741896_1072 (size=244) 2024-12-11T20:47:21,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741896_1072 (size=244) 2024-12-11T20:47:21,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741897_1073 (size=17) 2024-12-11T20:47:21,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741897_1073 (size=17) 2024-12-11T20:47:21,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741897_1073 (size=17) 2024-12-11T20:47:21,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741898_1074 (size=304056) 2024-12-11T20:47:21,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741898_1074 (size=304056) 2024-12-11T20:47:21,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741898_1074 (size=304056) 2024-12-11T20:47:21,684 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T20:47:21,685 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T20:47:22,061 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0001_000001 (auth:SIMPLE) from 127.0.0.1:51546 2024-12-11T20:47:23,744 INFO [master/5be53b084ac7:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-11T20:47:23,745 INFO [master/5be53b084ac7:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-11T20:47:31,775 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T20:47:32,390 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0001_000001 (auth:SIMPLE) from 127.0.0.1:58524 2024-12-11T20:47:32,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741899_1075 (size=349754) 2024-12-11T20:47:32,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741899_1075 (size=349754) 2024-12-11T20:47:32,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741899_1075 (size=349754) 2024-12-11T20:47:34,899 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0001_000001 (auth:SIMPLE) from 127.0.0.1:53356 2024-12-11T20:47:37,712 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 870517e9f0b0a069255c252b508eb39d, had cached 0 bytes from a total of 8324 2024-12-11T20:47:37,723 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region d4b8a9e23f55a7aec4663a69038062bc, had cached 0 bytes from a total of 5288 2024-12-11T20:47:41,997 DEBUG [master/5be53b084ac7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 92982b2478254451b467ff12e9fe4706 changed from -1.0 to 0.0, refreshing cache 2024-12-11T20:47:42,003 DEBUG [master/5be53b084ac7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region d4b8a9e23f55a7aec4663a69038062bc changed from -1.0 to 0.0, refreshing cache 2024-12-11T20:47:42,005 DEBUG [master/5be53b084ac7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 870517e9f0b0a069255c252b508eb39d changed from -1.0 to 0.0, refreshing cache 2024-12-11T20:47:59,115 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1c282182bbc4558ccfab0d62e02499bf, had cached 0 bytes from a total of 320414712 2024-12-11T20:47:59,294 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region b883908bc6626ac20c7479c3ccb08152, had cached 0 bytes from a total of 320414712 2024-12-11T20:48:01,775 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T20:48:17,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741900_1076 (size=134217728) 2024-12-11T20:48:17,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741900_1076 (size=134217728) 2024-12-11T20:48:17,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741900_1076 (size=134217728) 2024-12-11T20:48:22,713 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 870517e9f0b0a069255c252b508eb39d, had cached 0 bytes from a total of 8324 2024-12-11T20:48:22,723 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region d4b8a9e23f55a7aec4663a69038062bc, had cached 0 bytes from a total of 5288 2024-12-11T20:48:31,779 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T20:48:37,673 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa15ea1f18a797a27 with lease ID 0x6fd6a20e2ad20c4e: from storage DS-58496555-acc8-4e14-a2aa-6653727f2410 node DatanodeRegistration(127.0.0.1:44295, datanodeUuid=6bce5cc2-a500-49e3-aebf-6157469ea69c, infoPort=46505, infoSecurePort=0, ipcPort=38091, storageInfo=lv=-57;cid=testClusterID;nsid=1257551669;c=1733949986605), blocks: 35, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-11T20:48:37,674 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa15ea1f18a797a27 with lease ID 0x6fd6a20e2ad20c4e: from storage DS-ef1e522f-f697-42de-a997-46c88fcecc73 node DatanodeRegistration(127.0.0.1:44295, datanodeUuid=6bce5cc2-a500-49e3-aebf-6157469ea69c, infoPort=46505, infoSecurePort=0, ipcPort=38091, storageInfo=lv=-57;cid=testClusterID;nsid=1257551669;c=1733949986605), blocks: 35, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T20:48:44,115 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1c282182bbc4558ccfab0d62e02499bf, had cached 0 bytes from a total of 320414712 2024-12-11T20:48:44,294 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region b883908bc6626ac20c7479c3ccb08152, had cached 0 bytes from a total of 320414712 2024-12-11T20:48:56,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741901_1077 (size=134217728) 2024-12-11T20:48:56,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741901_1077 (size=134217728) 2024-12-11T20:48:56,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741901_1077 (size=134217728) 2024-12-11T20:49:01,779 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T20:49:07,713 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 870517e9f0b0a069255c252b508eb39d, had cached 0 bytes from a total of 8324 2024-12-11T20:49:07,724 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region d4b8a9e23f55a7aec4663a69038062bc, had cached 0 bytes from a total of 5288 2024-12-11T20:49:10,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741902_1078 (size=51979256) 2024-12-11T20:49:10,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741902_1078 (size=51979256) 2024-12-11T20:49:10,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741902_1078 (size=51979256) 2024-12-11T20:49:10,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741903_1079 (size=17509) 2024-12-11T20:49:10,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741903_1079 (size=17509) 2024-12-11T20:49:10,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741903_1079 (size=17509) 2024-12-11T20:49:10,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741904_1080 (size=482) 2024-12-11T20:49:10,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741904_1080 (size=482) 2024-12-11T20:49:10,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741904_1080 (size=482) 2024-12-11T20:49:10,678 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_0/usercache/jenkins/appcache/application_1733950008312_0001/container_1733950008312_0001_01_000002/launch_container.sh] 2024-12-11T20:49:10,678 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_0/usercache/jenkins/appcache/application_1733950008312_0001/container_1733950008312_0001_01_000002/container_tokens] 2024-12-11T20:49:10,678 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_0/usercache/jenkins/appcache/application_1733950008312_0001/container_1733950008312_0001_01_000002/sysfs] 2024-12-11T20:49:10,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741905_1081 (size=17509) 2024-12-11T20:49:10,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741905_1081 (size=17509) 2024-12-11T20:49:10,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741905_1081 (size=17509) 2024-12-11T20:49:10,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741906_1082 (size=349754) 2024-12-11T20:49:10,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741906_1082 (size=349754) 2024-12-11T20:49:10,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741906_1082 (size=349754) 2024-12-11T20:49:10,768 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0001_000001 (auth:SIMPLE) from 127.0.0.1:34524 2024-12-11T20:49:12,384 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T20:49:12,385 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T20:49:12,394 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:12,394 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T20:49:12,395 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T20:49:12,395 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_754086280_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:12,396 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-12-11T20:49:12,396 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-12-11T20:49:12,396 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_754086280_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950037886/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950037886/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:12,396 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950037886/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-12-11T20:49:12,396 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950037886/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-12-11T20:49:12,432 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:12,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=31, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:12,469 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950152469"}]},"ts":"1733950152469"} 2024-12-11T20:49:12,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-11T20:49:12,473 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-12-11T20:49:12,473 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-12-11T20:49:12,476 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion}] 2024-12-11T20:49:12,483 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=b883908bc6626ac20c7479c3ccb08152, UNASSIGN}, {pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=1c282182bbc4558ccfab0d62e02499bf, UNASSIGN}] 2024-12-11T20:49:12,485 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=1c282182bbc4558ccfab0d62e02499bf, UNASSIGN 2024-12-11T20:49:12,486 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=b883908bc6626ac20c7479c3ccb08152, UNASSIGN 2024-12-11T20:49:12,488 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=b883908bc6626ac20c7479c3ccb08152, regionState=CLOSING, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:49:12,488 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=1c282182bbc4558ccfab0d62e02499bf, regionState=CLOSING, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:49:12,490 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=b883908bc6626ac20c7479c3ccb08152, UNASSIGN because future has completed 2024-12-11T20:49:12,491 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T20:49:12,491 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=35, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure b883908bc6626ac20c7479c3ccb08152, server=5be53b084ac7,40355,1733949994820}] 2024-12-11T20:49:12,492 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=1c282182bbc4558ccfab0d62e02499bf, UNASSIGN because future has completed 2024-12-11T20:49:12,493 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T20:49:12,493 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=36, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1c282182bbc4558ccfab0d62e02499bf, server=5be53b084ac7,40355,1733949994820}] 2024-12-11T20:49:12,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-11T20:49:12,646 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(122): Close b883908bc6626ac20c7479c3ccb08152 2024-12-11T20:49:12,646 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T20:49:12,646 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1722): Closing b883908bc6626ac20c7479c3ccb08152, disabling compactions & flushes 2024-12-11T20:49:12,646 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733950028481.b883908bc6626ac20c7479c3ccb08152. 2024-12-11T20:49:12,646 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733950028481.b883908bc6626ac20c7479c3ccb08152. 2024-12-11T20:49:12,646 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733950028481.b883908bc6626ac20c7479c3ccb08152. after waiting 0 ms 2024-12-11T20:49:12,646 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733950028481.b883908bc6626ac20c7479c3ccb08152. 2024-12-11T20:49:12,652 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/b883908bc6626ac20c7479c3ccb08152/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-12-11T20:49:12,653 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:49:12,653 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733950028481.b883908bc6626ac20c7479c3ccb08152. 2024-12-11T20:49:12,653 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1676): Region close journal for b883908bc6626ac20c7479c3ccb08152: Waiting for close lock at 1733950152646Running coprocessor pre-close hooks at 1733950152646Disabling compacts and flushes for region at 1733950152646Disabling writes for close at 1733950152646Writing region close event to WAL at 1733950152647 (+1 ms)Running coprocessor post-close hooks at 1733950152653 (+6 ms)Closed at 1733950152653 2024-12-11T20:49:12,655 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(157): Closed b883908bc6626ac20c7479c3ccb08152 2024-12-11T20:49:12,655 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(122): Close 1c282182bbc4558ccfab0d62e02499bf 2024-12-11T20:49:12,655 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T20:49:12,655 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1722): Closing 1c282182bbc4558ccfab0d62e02499bf, disabling compactions & flushes 2024-12-11T20:49:12,655 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,5,1733950028481.1c282182bbc4558ccfab0d62e02499bf. 2024-12-11T20:49:12,655 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,5,1733950028481.1c282182bbc4558ccfab0d62e02499bf. 2024-12-11T20:49:12,656 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,5,1733950028481.1c282182bbc4558ccfab0d62e02499bf. after waiting 0 ms 2024-12-11T20:49:12,656 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,5,1733950028481.1c282182bbc4558ccfab0d62e02499bf. 2024-12-11T20:49:12,656 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=b883908bc6626ac20c7479c3ccb08152, regionState=CLOSED 2024-12-11T20:49:12,658 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=35, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure b883908bc6626ac20c7479c3ccb08152, server=5be53b084ac7,40355,1733949994820 because future has completed 2024-12-11T20:49:12,660 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/1c282182bbc4558ccfab0d62e02499bf/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-12-11T20:49:12,661 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:49:12,661 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,5,1733950028481.1c282182bbc4558ccfab0d62e02499bf. 2024-12-11T20:49:12,662 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1676): Region close journal for 1c282182bbc4558ccfab0d62e02499bf: Waiting for close lock at 1733950152655Running coprocessor pre-close hooks at 1733950152655Disabling compacts and flushes for region at 1733950152655Disabling writes for close at 1733950152656 (+1 ms)Writing region close event to WAL at 1733950152656Running coprocessor post-close hooks at 1733950152661 (+5 ms)Closed at 1733950152661 2024-12-11T20:49:12,663 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(157): Closed 1c282182bbc4558ccfab0d62e02499bf 2024-12-11T20:49:12,664 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=35, resume processing ppid=33 2024-12-11T20:49:12,664 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=33, state=SUCCESS, hasLock=false; CloseRegionProcedure b883908bc6626ac20c7479c3ccb08152, server=5be53b084ac7,40355,1733949994820 in 170 msec 2024-12-11T20:49:12,664 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=1c282182bbc4558ccfab0d62e02499bf, regionState=CLOSED 2024-12-11T20:49:12,665 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=b883908bc6626ac20c7479c3ccb08152, UNASSIGN in 181 msec 2024-12-11T20:49:12,666 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=36, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1c282182bbc4558ccfab0d62e02499bf, server=5be53b084ac7,40355,1733949994820 because future has completed 2024-12-11T20:49:12,670 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=36, resume processing ppid=34 2024-12-11T20:49:12,670 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, ppid=34, state=SUCCESS, hasLock=false; CloseRegionProcedure 1c282182bbc4558ccfab0d62e02499bf, server=5be53b084ac7,40355,1733949994820 in 174 msec 2024-12-11T20:49:12,673 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=34, resume processing ppid=32 2024-12-11T20:49:12,673 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=1c282182bbc4558ccfab0d62e02499bf, UNASSIGN in 187 msec 2024-12-11T20:49:12,676 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=32, resume processing ppid=31 2024-12-11T20:49:12,676 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, ppid=31, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion in 198 msec 2024-12-11T20:49:12,678 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950152678"}]},"ts":"1733950152678"} 2024-12-11T20:49:12,681 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-12-11T20:49:12,681 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-12-11T20:49:12,684 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion in 248 msec 2024-12-11T20:49:12,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-11T20:49:12,783 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-11T20:49:12,787 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:12,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:12,842 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:12,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] access.PermissionStorage(261): Removing permissions of removed table testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:12,845 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=37, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:12,847 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57633, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:49:12,854 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35533, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-12-11T20:49:12,856 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:12,858 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/fef909894bb25880053931bc84e6d928 2024-12-11T20:49:12,858 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/b883908bc6626ac20c7479c3ccb08152 2024-12-11T20:49:12,858 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/1c282182bbc4558ccfab0d62e02499bf 2024-12-11T20:49:12,863 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/b883908bc6626ac20c7479c3ccb08152/cf, FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/b883908bc6626ac20c7479c3ccb08152/recovered.edits] 2024-12-11T20:49:12,863 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/fef909894bb25880053931bc84e6d928/cf, FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/fef909894bb25880053931bc84e6d928/recovered.edits] 2024-12-11T20:49:12,863 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/1c282182bbc4558ccfab0d62e02499bf/cf, FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/1c282182bbc4558ccfab0d62e02499bf/recovered.edits] 2024-12-11T20:49:13,084 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/b883908bc6626ac20c7479c3ccb08152/cf/7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_.fef909894bb25880053931bc84e6d928 to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testExportFileSystemStateWithSplitRegion/b883908bc6626ac20c7479c3ccb08152/cf/7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_.fef909894bb25880053931bc84e6d928 2024-12-11T20:49:13,092 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/1c282182bbc4558ccfab0d62e02499bf/cf/7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_.fef909894bb25880053931bc84e6d928 to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testExportFileSystemStateWithSplitRegion/1c282182bbc4558ccfab0d62e02499bf/cf/7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_.fef909894bb25880053931bc84e6d928 2024-12-11T20:49:13,093 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/b883908bc6626ac20c7479c3ccb08152/recovered.edits/10.seqid to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testExportFileSystemStateWithSplitRegion/b883908bc6626ac20c7479c3ccb08152/recovered.edits/10.seqid 2024-12-11T20:49:13,093 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/fef909894bb25880053931bc84e6d928/cf/7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_ to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testExportFileSystemStateWithSplitRegion/fef909894bb25880053931bc84e6d928/cf/7b75518489804af3ba8f3bae5c04b0e8_SeqId_4_ 2024-12-11T20:49:13,093 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/b883908bc6626ac20c7479c3ccb08152 2024-12-11T20:49:13,121 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/1c282182bbc4558ccfab0d62e02499bf/recovered.edits/10.seqid to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testExportFileSystemStateWithSplitRegion/1c282182bbc4558ccfab0d62e02499bf/recovered.edits/10.seqid 2024-12-11T20:49:13,122 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/fef909894bb25880053931bc84e6d928/recovered.edits/6.seqid to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testExportFileSystemStateWithSplitRegion/fef909894bb25880053931bc84e6d928/recovered.edits/6.seqid 2024-12-11T20:49:13,123 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/1c282182bbc4558ccfab0d62e02499bf 2024-12-11T20:49:13,123 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportFileSystemStateWithSplitRegion/fef909894bb25880053931bc84e6d928 2024-12-11T20:49:13,123 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testExportFileSystemStateWithSplitRegion regions 2024-12-11T20:49:13,127 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=37, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,132 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40781 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-11T20:49:13,139 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 3 rows of testExportFileSystemStateWithSplitRegion from hbase:meta 2024-12-11T20:49:13,144 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'testExportFileSystemStateWithSplitRegion' descriptor. 2024-12-11T20:49:13,146 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=37, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,147 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'testExportFileSystemStateWithSplitRegion' from region states. 2024-12-11T20:49:13,147 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733950153147"}]},"ts":"9223372036854775807"} 2024-12-11T20:49:13,147 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1733950028481.b883908bc6626ac20c7479c3ccb08152.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733950153147"}]},"ts":"9223372036854775807"} 2024-12-11T20:49:13,147 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,5,1733950028481.1c282182bbc4558ccfab0d62e02499bf.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733950153147"}]},"ts":"9223372036854775807"} 2024-12-11T20:49:13,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,150 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-11T20:49:13,150 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-11T20:49:13,150 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-11T20:49:13,151 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-11T20:49:13,152 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 3 regions from META 2024-12-11T20:49:13,152 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => fef909894bb25880053931bc84e6d928, NAME => 'testExportFileSystemStateWithSplitRegion,,1733950015409.fef909894bb25880053931bc84e6d928.', STARTKEY => '', ENDKEY => ''}, {ENCODED => b883908bc6626ac20c7479c3ccb08152, NAME => 'testExportFileSystemStateWithSplitRegion,,1733950028481.b883908bc6626ac20c7479c3ccb08152.', STARTKEY => '', ENDKEY => '5'}, {ENCODED => 1c282182bbc4558ccfab0d62e02499bf, NAME => 'testExportFileSystemStateWithSplitRegion,5,1733950028481.1c282182bbc4558ccfab0d62e02499bf.', STARTKEY => '5', ENDKEY => ''}] 2024-12-11T20:49:13,153 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'testExportFileSystemStateWithSplitRegion' as deleted. 2024-12-11T20:49:13,153 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733950153153"}]},"ts":"9223372036854775807"} 2024-12-11T20:49:13,159 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testExportFileSystemStateWithSplitRegion state from META 2024-12-11T20:49:13,161 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=37, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:49:13,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:49:13,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:49:13,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:49:13,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-12-11T20:49:13,165 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion in 374 msec 2024-12-11T20:49:13,166 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T20:49:13,166 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T20:49:13,166 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T20:49:13,166 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T20:49:13,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-12-11T20:49:13,274 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,274 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-11T20:49:13,275 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=38, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-11T20:49:13,279 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950153279"}]},"ts":"1733950153279"} 2024-12-11T20:49:13,281 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-12-11T20:49:13,281 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-12-11T20:49:13,282 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion}] 2024-12-11T20:49:13,284 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=d4b8a9e23f55a7aec4663a69038062bc, UNASSIGN}, {pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=870517e9f0b0a069255c252b508eb39d, UNASSIGN}] 2024-12-11T20:49:13,286 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=870517e9f0b0a069255c252b508eb39d, UNASSIGN 2024-12-11T20:49:13,286 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=d4b8a9e23f55a7aec4663a69038062bc, UNASSIGN 2024-12-11T20:49:13,287 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=870517e9f0b0a069255c252b508eb39d, regionState=CLOSING, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:49:13,287 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=d4b8a9e23f55a7aec4663a69038062bc, regionState=CLOSING, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:49:13,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=870517e9f0b0a069255c252b508eb39d, UNASSIGN because future has completed 2024-12-11T20:49:13,293 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T20:49:13,294 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure 870517e9f0b0a069255c252b508eb39d, server=5be53b084ac7,40781,1733949995244}] 2024-12-11T20:49:13,294 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=d4b8a9e23f55a7aec4663a69038062bc, UNASSIGN because future has completed 2024-12-11T20:49:13,305 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T20:49:13,305 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=43, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure d4b8a9e23f55a7aec4663a69038062bc, server=5be53b084ac7,40355,1733949994820}] 2024-12-11T20:49:13,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-11T20:49:13,454 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52327, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T20:49:13,455 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(122): Close 870517e9f0b0a069255c252b508eb39d 2024-12-11T20:49:13,455 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T20:49:13,455 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1722): Closing 870517e9f0b0a069255c252b508eb39d, disabling compactions & flushes 2024-12-11T20:49:13,455 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d. 2024-12-11T20:49:13,455 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d. 2024-12-11T20:49:13,455 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d. after waiting 0 ms 2024-12-11T20:49:13,455 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d. 2024-12-11T20:49:13,460 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/870517e9f0b0a069255c252b508eb39d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T20:49:13,460 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:49:13,460 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d. 2024-12-11T20:49:13,461 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1676): Region close journal for 870517e9f0b0a069255c252b508eb39d: Waiting for close lock at 1733950153455Running coprocessor pre-close hooks at 1733950153455Disabling compacts and flushes for region at 1733950153455Disabling writes for close at 1733950153455Writing region close event to WAL at 1733950153456 (+1 ms)Running coprocessor post-close hooks at 1733950153460 (+4 ms)Closed at 1733950153460 2024-12-11T20:49:13,463 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(157): Closed 870517e9f0b0a069255c252b508eb39d 2024-12-11T20:49:13,463 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(122): Close d4b8a9e23f55a7aec4663a69038062bc 2024-12-11T20:49:13,463 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T20:49:13,463 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=870517e9f0b0a069255c252b508eb39d, regionState=CLOSED 2024-12-11T20:49:13,463 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1722): Closing d4b8a9e23f55a7aec4663a69038062bc, disabling compactions & flushes 2024-12-11T20:49:13,463 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc. 2024-12-11T20:49:13,463 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc. 2024-12-11T20:49:13,463 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc. after waiting 0 ms 2024-12-11T20:49:13,463 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc. 2024-12-11T20:49:13,465 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=42, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure 870517e9f0b0a069255c252b508eb39d, server=5be53b084ac7,40781,1733949995244 because future has completed 2024-12-11T20:49:13,468 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/d4b8a9e23f55a7aec4663a69038062bc/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T20:49:13,469 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:49:13,469 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc. 2024-12-11T20:49:13,470 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1676): Region close journal for d4b8a9e23f55a7aec4663a69038062bc: Waiting for close lock at 1733950153463Running coprocessor pre-close hooks at 1733950153463Disabling compacts and flushes for region at 1733950153463Disabling writes for close at 1733950153463Writing region close event to WAL at 1733950153465 (+2 ms)Running coprocessor post-close hooks at 1733950153469 (+4 ms)Closed at 1733950153469 2024-12-11T20:49:13,470 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=42, resume processing ppid=41 2024-12-11T20:49:13,470 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=41, state=SUCCESS, hasLock=false; CloseRegionProcedure 870517e9f0b0a069255c252b508eb39d, server=5be53b084ac7,40781,1733949995244 in 174 msec 2024-12-11T20:49:13,472 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(157): Closed d4b8a9e23f55a7aec4663a69038062bc 2024-12-11T20:49:13,474 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=870517e9f0b0a069255c252b508eb39d, UNASSIGN in 186 msec 2024-12-11T20:49:13,474 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=d4b8a9e23f55a7aec4663a69038062bc, regionState=CLOSED 2024-12-11T20:49:13,477 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=43, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure d4b8a9e23f55a7aec4663a69038062bc, server=5be53b084ac7,40355,1733949994820 because future has completed 2024-12-11T20:49:13,481 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=43, resume processing ppid=40 2024-12-11T20:49:13,482 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, ppid=40, state=SUCCESS, hasLock=false; CloseRegionProcedure d4b8a9e23f55a7aec4663a69038062bc, server=5be53b084ac7,40355,1733949994820 in 173 msec 2024-12-11T20:49:13,485 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=40, resume processing ppid=39 2024-12-11T20:49:13,485 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=d4b8a9e23f55a7aec4663a69038062bc, UNASSIGN in 198 msec 2024-12-11T20:49:13,493 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=39, resume processing ppid=38 2024-12-11T20:49:13,493 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=38, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 205 msec 2024-12-11T20:49:13,496 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950153496"}]},"ts":"1733950153496"} 2024-12-11T20:49:13,499 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-12-11T20:49:13,499 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-12-11T20:49:13,503 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 225 msec 2024-12-11T20:49:13,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-11T20:49:13,593 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-11T20:49:13,594 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,596 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,597 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=44, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,600 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,602 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/870517e9f0b0a069255c252b508eb39d 2024-12-11T20:49:13,602 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/d4b8a9e23f55a7aec4663a69038062bc 2024-12-11T20:49:13,605 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/d4b8a9e23f55a7aec4663a69038062bc/cf, FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/d4b8a9e23f55a7aec4663a69038062bc/recovered.edits] 2024-12-11T20:49:13,605 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/870517e9f0b0a069255c252b508eb39d/cf, FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/870517e9f0b0a069255c252b508eb39d/recovered.edits] 2024-12-11T20:49:13,612 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/870517e9f0b0a069255c252b508eb39d/cf/fbca09893f1743398c862a436d02d131 to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/870517e9f0b0a069255c252b508eb39d/cf/fbca09893f1743398c862a436d02d131 2024-12-11T20:49:13,612 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/d4b8a9e23f55a7aec4663a69038062bc/cf/127a0ac6373b49d0bf865e78d86cca93 to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/d4b8a9e23f55a7aec4663a69038062bc/cf/127a0ac6373b49d0bf865e78d86cca93 2024-12-11T20:49:13,620 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/870517e9f0b0a069255c252b508eb39d/recovered.edits/9.seqid to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/870517e9f0b0a069255c252b508eb39d/recovered.edits/9.seqid 2024-12-11T20:49:13,632 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/d4b8a9e23f55a7aec4663a69038062bc/recovered.edits/9.seqid to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/d4b8a9e23f55a7aec4663a69038062bc/recovered.edits/9.seqid 2024-12-11T20:49:13,632 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/870517e9f0b0a069255c252b508eb39d 2024-12-11T20:49:13,633 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSplitRegion/d4b8a9e23f55a7aec4663a69038062bc 2024-12-11T20:49:13,634 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSplitRegion regions 2024-12-11T20:49:13,639 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=44, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,643 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSplitRegion from hbase:meta 2024-12-11T20:49:13,646 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithSplitRegion' descriptor. 2024-12-11T20:49:13,647 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=44, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,648 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithSplitRegion' from region states. 2024-12-11T20:49:13,648 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733950153648"}]},"ts":"9223372036854775807"} 2024-12-11T20:49:13,648 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733950153648"}]},"ts":"9223372036854775807"} 2024-12-11T20:49:13,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,650 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-11T20:49:13,650 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-11T20:49:13,650 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-11T20:49:13,650 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-11T20:49:13,652 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T20:49:13,652 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => d4b8a9e23f55a7aec4663a69038062bc, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733950012015.d4b8a9e23f55a7aec4663a69038062bc.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 870517e9f0b0a069255c252b508eb39d, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733950012015.870517e9f0b0a069255c252b508eb39d.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T20:49:13,652 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithSplitRegion' as deleted. 2024-12-11T20:49:13,652 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733950153652"}]},"ts":"9223372036854775807"} 2024-12-11T20:49:13,654 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSplitRegion state from META 2024-12-11T20:49:13,655 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=44, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,662 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 61 msec 2024-12-11T20:49:13,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:49:13,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:49:13,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:49:13,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:49:13,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-12-11T20:49:13,668 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,668 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-11T20:49:13,718 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-11T20:49:13,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,725 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-11T20:49:13,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,731 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-11T20:49:13,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,767 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=764 (was 714) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (166867004) connection to localhost/127.0.0.1:36717 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_754086280_22 at /127.0.0.1:39366 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 108580) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1382 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: RSProcedureDispatcher-pool-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-620804413_1 at /127.0.0.1:59266 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35655 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (166867004) connection to localhost/127.0.0.1:36717 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/5be53b084ac7:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/5be53b084ac7:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/5be53b084ac7:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-620804413_1 at /127.0.0.1:60536 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (166867004) connection to localhost/127.0.0.1:35655 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_754086280_22 at /127.0.0.1:42202 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_754086280_22 at /127.0.0.1:44382 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=781 (was 763) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=664 (was 759), ProcessCount=17 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=705 (was 4468) 2024-12-11T20:49:13,768 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=764 is superior to 500 2024-12-11T20:49:13,786 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=764, OpenFileDescriptor=781, MaxFileDescriptor=1048576, SystemLoadAverage=664, ProcessCount=17, AvailableMemoryMB=704 2024-12-11T20:49:13,786 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=764 is superior to 500 2024-12-11T20:49:13,788 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T20:49:13,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName 2024-12-11T20:49:13,791 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T20:49:13,792 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:49:13,792 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 45 2024-12-11T20:49:13,793 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T20:49:13,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-11T20:49:13,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741907_1083 (size=406) 2024-12-11T20:49:13,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741907_1083 (size=406) 2024-12-11T20:49:13,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741907_1083 (size=406) 2024-12-11T20:49:13,815 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d8d8acc9953a10e1b739b6c090a7f909, NAME => 'testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:49:13,815 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => f7f4b125b358ba36e7f14b1aaa372126, NAME => 'testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:49:13,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741908_1084 (size=67) 2024-12-11T20:49:13,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741908_1084 (size=67) 2024-12-11T20:49:13,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741908_1084 (size=67) 2024-12-11T20:49:13,850 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:49:13,850 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1722): Closing d8d8acc9953a10e1b739b6c090a7f909, disabling compactions & flushes 2024-12-11T20:49:13,850 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909. 2024-12-11T20:49:13,850 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909. 2024-12-11T20:49:13,850 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909. after waiting 0 ms 2024-12-11T20:49:13,850 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909. 2024-12-11T20:49:13,850 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909. 2024-12-11T20:49:13,850 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1676): Region close journal for d8d8acc9953a10e1b739b6c090a7f909: Waiting for close lock at 1733950153850Disabling compacts and flushes for region at 1733950153850Disabling writes for close at 1733950153850Writing region close event to WAL at 1733950153850Closed at 1733950153850 2024-12-11T20:49:13,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741909_1085 (size=67) 2024-12-11T20:49:13,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741909_1085 (size=67) 2024-12-11T20:49:13,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741909_1085 (size=67) 2024-12-11T20:49:13,863 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:49:13,863 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1722): Closing f7f4b125b358ba36e7f14b1aaa372126, disabling compactions & flushes 2024-12-11T20:49:13,863 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126. 2024-12-11T20:49:13,863 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126. 2024-12-11T20:49:13,863 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126. after waiting 0 ms 2024-12-11T20:49:13,863 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126. 2024-12-11T20:49:13,864 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126. 2024-12-11T20:49:13,864 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1676): Region close journal for f7f4b125b358ba36e7f14b1aaa372126: Waiting for close lock at 1733950153863Disabling compacts and flushes for region at 1733950153863Disabling writes for close at 1733950153863Writing region close event to WAL at 1733950153864 (+1 ms)Closed at 1733950153864 2024-12-11T20:49:13,865 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T20:49:13,866 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733950153865"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950153865"}]},"ts":"1733950153865"} 2024-12-11T20:49:13,866 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733950153865"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950153865"}]},"ts":"1733950153865"} 2024-12-11T20:49:13,869 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T20:49:13,871 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T20:49:13,871 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950153871"}]},"ts":"1733950153871"} 2024-12-11T20:49:13,873 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-12-11T20:49:13,873 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {5be53b084ac7=0} racks are {/default-rack=0} 2024-12-11T20:49:13,875 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T20:49:13,875 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T20:49:13,875 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T20:49:13,875 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T20:49:13,875 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T20:49:13,875 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T20:49:13,875 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T20:49:13,875 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T20:49:13,875 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T20:49:13,875 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T20:49:13,876 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=d8d8acc9953a10e1b739b6c090a7f909, ASSIGN}, {pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f7f4b125b358ba36e7f14b1aaa372126, ASSIGN}] 2024-12-11T20:49:13,877 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f7f4b125b358ba36e7f14b1aaa372126, ASSIGN 2024-12-11T20:49:13,877 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=d8d8acc9953a10e1b739b6c090a7f909, ASSIGN 2024-12-11T20:49:13,881 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f7f4b125b358ba36e7f14b1aaa372126, ASSIGN; state=OFFLINE, location=5be53b084ac7,39865,1733949995112; forceNewPlan=false, retain=false 2024-12-11T20:49:13,881 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=d8d8acc9953a10e1b739b6c090a7f909, ASSIGN; state=OFFLINE, location=5be53b084ac7,40355,1733949994820; forceNewPlan=false, retain=false 2024-12-11T20:49:13,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-11T20:49:13,973 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:13,973 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T20:49:14,031 INFO [5be53b084ac7:41779 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T20:49:14,032 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=f7f4b125b358ba36e7f14b1aaa372126, regionState=OPENING, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:49:14,032 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=d8d8acc9953a10e1b739b6c090a7f909, regionState=OPENING, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:49:14,034 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f7f4b125b358ba36e7f14b1aaa372126, ASSIGN because future has completed 2024-12-11T20:49:14,034 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure f7f4b125b358ba36e7f14b1aaa372126, server=5be53b084ac7,39865,1733949995112}] 2024-12-11T20:49:14,034 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=d8d8acc9953a10e1b739b6c090a7f909, ASSIGN because future has completed 2024-12-11T20:49:14,035 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=49, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure d8d8acc9953a10e1b739b6c090a7f909, server=5be53b084ac7,40355,1733949994820}] 2024-12-11T20:49:14,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-11T20:49:14,187 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45483, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T20:49:14,189 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909. 2024-12-11T20:49:14,190 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126. 2024-12-11T20:49:14,190 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7752): Opening region: {ENCODED => d8d8acc9953a10e1b739b6c090a7f909, NAME => 'testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T20:49:14,190 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7752): Opening region: {ENCODED => f7f4b125b358ba36e7f14b1aaa372126, NAME => 'testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T20:49:14,190 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126. service=AccessControlService 2024-12-11T20:49:14,190 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909. service=AccessControlService 2024-12-11T20:49:14,190 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:49:14,191 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:49:14,191 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName d8d8acc9953a10e1b739b6c090a7f909 2024-12-11T20:49:14,191 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName f7f4b125b358ba36e7f14b1aaa372126 2024-12-11T20:49:14,191 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:49:14,191 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:49:14,191 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7794): checking encryption for d8d8acc9953a10e1b739b6c090a7f909 2024-12-11T20:49:14,191 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7794): checking encryption for f7f4b125b358ba36e7f14b1aaa372126 2024-12-11T20:49:14,191 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7797): checking classloading for d8d8acc9953a10e1b739b6c090a7f909 2024-12-11T20:49:14,191 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7797): checking classloading for f7f4b125b358ba36e7f14b1aaa372126 2024-12-11T20:49:14,193 INFO [StoreOpener-d8d8acc9953a10e1b739b6c090a7f909-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d8d8acc9953a10e1b739b6c090a7f909 2024-12-11T20:49:14,193 INFO [StoreOpener-f7f4b125b358ba36e7f14b1aaa372126-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f7f4b125b358ba36e7f14b1aaa372126 2024-12-11T20:49:14,194 INFO [StoreOpener-f7f4b125b358ba36e7f14b1aaa372126-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f7f4b125b358ba36e7f14b1aaa372126 columnFamilyName cf 2024-12-11T20:49:14,194 INFO [StoreOpener-d8d8acc9953a10e1b739b6c090a7f909-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d8d8acc9953a10e1b739b6c090a7f909 columnFamilyName cf 2024-12-11T20:49:14,194 DEBUG [StoreOpener-f7f4b125b358ba36e7f14b1aaa372126-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:49:14,194 DEBUG [StoreOpener-d8d8acc9953a10e1b739b6c090a7f909-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:49:14,195 INFO [StoreOpener-d8d8acc9953a10e1b739b6c090a7f909-1 {}] regionserver.HStore(327): Store=d8d8acc9953a10e1b739b6c090a7f909/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:49:14,195 INFO [StoreOpener-f7f4b125b358ba36e7f14b1aaa372126-1 {}] regionserver.HStore(327): Store=f7f4b125b358ba36e7f14b1aaa372126/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:49:14,195 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1038): replaying wal for f7f4b125b358ba36e7f14b1aaa372126 2024-12-11T20:49:14,195 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1038): replaying wal for d8d8acc9953a10e1b739b6c090a7f909 2024-12-11T20:49:14,196 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/d8d8acc9953a10e1b739b6c090a7f909 2024-12-11T20:49:14,196 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/f7f4b125b358ba36e7f14b1aaa372126 2024-12-11T20:49:14,196 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/d8d8acc9953a10e1b739b6c090a7f909 2024-12-11T20:49:14,196 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/f7f4b125b358ba36e7f14b1aaa372126 2024-12-11T20:49:14,197 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1048): stopping wal replay for d8d8acc9953a10e1b739b6c090a7f909 2024-12-11T20:49:14,197 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1048): stopping wal replay for f7f4b125b358ba36e7f14b1aaa372126 2024-12-11T20:49:14,197 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1060): Cleaning up temporary data for d8d8acc9953a10e1b739b6c090a7f909 2024-12-11T20:49:14,197 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1060): Cleaning up temporary data for f7f4b125b358ba36e7f14b1aaa372126 2024-12-11T20:49:14,198 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1093): writing seq id for f7f4b125b358ba36e7f14b1aaa372126 2024-12-11T20:49:14,198 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1093): writing seq id for d8d8acc9953a10e1b739b6c090a7f909 2024-12-11T20:49:14,201 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/d8d8acc9953a10e1b739b6c090a7f909/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:49:14,201 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1114): Opened d8d8acc9953a10e1b739b6c090a7f909; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66866942, jitterRate=-0.0036049187183380127}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:49:14,202 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d8d8acc9953a10e1b739b6c090a7f909 2024-12-11T20:49:14,203 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1006): Region open journal for d8d8acc9953a10e1b739b6c090a7f909: Running coprocessor pre-open hook at 1733950154191Writing region info on filesystem at 1733950154191Initializing all the Stores at 1733950154192 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950154192Cleaning up temporary data from old regions at 1733950154197 (+5 ms)Running coprocessor post-open hooks at 1733950154202 (+5 ms)Region opened successfully at 1733950154202 2024-12-11T20:49:14,203 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/f7f4b125b358ba36e7f14b1aaa372126/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:49:14,203 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909., pid=49, masterSystemTime=1733950154186 2024-12-11T20:49:14,203 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1114): Opened f7f4b125b358ba36e7f14b1aaa372126; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73723682, jitterRate=0.09856846928596497}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:49:14,204 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f7f4b125b358ba36e7f14b1aaa372126 2024-12-11T20:49:14,204 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1006): Region open journal for f7f4b125b358ba36e7f14b1aaa372126: Running coprocessor pre-open hook at 1733950154191Writing region info on filesystem at 1733950154191Initializing all the Stores at 1733950154192 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950154192Cleaning up temporary data from old regions at 1733950154197 (+5 ms)Running coprocessor post-open hooks at 1733950154204 (+7 ms)Region opened successfully at 1733950154204 2024-12-11T20:49:14,204 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126., pid=48, masterSystemTime=1733950154186 2024-12-11T20:49:14,206 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909. 2024-12-11T20:49:14,206 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909. 2024-12-11T20:49:14,206 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=d8d8acc9953a10e1b739b6c090a7f909, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:49:14,207 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126. 2024-12-11T20:49:14,207 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126. 2024-12-11T20:49:14,208 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=f7f4b125b358ba36e7f14b1aaa372126, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:49:14,209 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=49, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure d8d8acc9953a10e1b739b6c090a7f909, server=5be53b084ac7,40355,1733949994820 because future has completed 2024-12-11T20:49:14,210 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=48, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure f7f4b125b358ba36e7f14b1aaa372126, server=5be53b084ac7,39865,1733949995112 because future has completed 2024-12-11T20:49:14,212 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=49, resume processing ppid=46 2024-12-11T20:49:14,212 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=49, ppid=46, state=SUCCESS, hasLock=false; OpenRegionProcedure d8d8acc9953a10e1b739b6c090a7f909, server=5be53b084ac7,40355,1733949994820 in 175 msec 2024-12-11T20:49:14,215 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=48, resume processing ppid=47 2024-12-11T20:49:14,215 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=46, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=d8d8acc9953a10e1b739b6c090a7f909, ASSIGN in 336 msec 2024-12-11T20:49:14,215 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=48, ppid=47, state=SUCCESS, hasLock=false; OpenRegionProcedure f7f4b125b358ba36e7f14b1aaa372126, server=5be53b084ac7,39865,1733949995112 in 177 msec 2024-12-11T20:49:14,218 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=47, resume processing ppid=45 2024-12-11T20:49:14,218 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=47, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f7f4b125b358ba36e7f14b1aaa372126, ASSIGN in 339 msec 2024-12-11T20:49:14,219 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T20:49:14,219 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950154219"}]},"ts":"1733950154219"} 2024-12-11T20:49:14,221 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-12-11T20:49:14,223 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T20:49:14,223 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-12-11T20:49:14,227 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-11T20:49:14,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:49:14,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:49:14,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:49:14,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:49:14,296 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-11T20:49:14,296 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-11T20:49:14,297 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-11T20:49:14,297 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-11T20:49:14,299 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=45, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName in 508 msec 2024-12-11T20:49:14,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-11T20:49:14,423 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-11T20:49:14,423 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithTargetName get assigned. Timeout = 60000ms 2024-12-11T20:49:14,423 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:49:14,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithTargetName assigned to meta. Checking AM states. 2024-12-11T20:49:14,427 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:49:14,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithTargetName assigned. 2024-12-11T20:49:14,427 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-11T20:49:14,431 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-11T20:49:14,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733950154431 (current time:1733950154431). 2024-12-11T20:49:14,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T20:49:14,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-11T20:49:14,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T20:49:14,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29d5196, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:14,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:49:14,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:49:14,433 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:49:14,433 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:49:14,433 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:49:14,433 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@164b20f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:14,433 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:49:14,434 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:49:14,434 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:14,435 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34158, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:49:14,435 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6fef3a88, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:14,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:49:14,436 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:49:14,437 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:49:14,438 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51296, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:49:14,439 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779. 2024-12-11T20:49:14,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:49:14,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:14,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:14,439 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:49:14,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b36bd9c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:14,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:49:14,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:49:14,441 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:49:14,441 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:49:14,441 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:49:14,441 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@502aa8f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:14,441 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:49:14,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:49:14,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:14,442 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34168, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:49:14,443 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18a9cad6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:14,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:49:14,445 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:49:14,445 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:49:14,446 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51302, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:49:14,448 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:49:14,448 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:49:14,450 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53130, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:49:14,451 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779. 2024-12-11T20:49:14,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:49:14,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:14,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:14,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-11T20:49:14,452 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:49:14,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T20:49:14,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-11T20:49:14,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-12-11T20:49:14,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-11T20:49:14,455 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T20:49:14,457 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T20:49:14,460 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T20:49:14,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741910_1086 (size=167) 2024-12-11T20:49:14,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741910_1086 (size=167) 2024-12-11T20:49:14,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741910_1086 (size=167) 2024-12-11T20:49:14,475 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T20:49:14,475 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d8d8acc9953a10e1b739b6c090a7f909}, {pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f7f4b125b358ba36e7f14b1aaa372126}] 2024-12-11T20:49:14,476 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d8d8acc9953a10e1b739b6c090a7f909 2024-12-11T20:49:14,477 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f7f4b125b358ba36e7f14b1aaa372126 2024-12-11T20:49:14,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-11T20:49:14,628 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40355 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=51 2024-12-11T20:49:14,628 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=52 2024-12-11T20:49:14,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909. 2024-12-11T20:49:14,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126. 2024-12-11T20:49:14,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.HRegion(2603): Flush status journal for d8d8acc9953a10e1b739b6c090a7f909: 2024-12-11T20:49:14,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909. for emptySnaptb0-testExportWithTargetName completed. 2024-12-11T20:49:14,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.HRegion(2603): Flush status journal for f7f4b125b358ba36e7f14b1aaa372126: 2024-12-11T20:49:14,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126. for emptySnaptb0-testExportWithTargetName completed. 2024-12-11T20:49:14,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-11T20:49:14,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-11T20:49:14,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:49:14,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:49:14,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T20:49:14,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T20:49:14,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741911_1087 (size=70) 2024-12-11T20:49:14,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741911_1087 (size=70) 2024-12-11T20:49:14,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741911_1087 (size=70) 2024-12-11T20:49:14,640 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126. 2024-12-11T20:49:14,641 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-11T20:49:14,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=52 2024-12-11T20:49:14,641 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region f7f4b125b358ba36e7f14b1aaa372126 2024-12-11T20:49:14,642 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f7f4b125b358ba36e7f14b1aaa372126 2024-12-11T20:49:14,644 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=52, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f7f4b125b358ba36e7f14b1aaa372126 in 168 msec 2024-12-11T20:49:14,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741912_1088 (size=70) 2024-12-11T20:49:14,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741912_1088 (size=70) 2024-12-11T20:49:14,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741912_1088 (size=70) 2024-12-11T20:49:14,648 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909. 2024-12-11T20:49:14,648 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=51 2024-12-11T20:49:14,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=51 2024-12-11T20:49:14,649 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region d8d8acc9953a10e1b739b6c090a7f909 2024-12-11T20:49:14,649 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d8d8acc9953a10e1b739b6c090a7f909 2024-12-11T20:49:14,651 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=51, resume processing ppid=50 2024-12-11T20:49:14,651 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=51, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d8d8acc9953a10e1b739b6c090a7f909 in 175 msec 2024-12-11T20:49:14,651 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T20:49:14,652 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T20:49:14,655 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T20:49:14,655 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-12-11T20:49:14,656 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-12-11T20:49:14,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741913_1089 (size=549) 2024-12-11T20:49:14,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741913_1089 (size=549) 2024-12-11T20:49:14,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741913_1089 (size=549) 2024-12-11T20:49:14,668 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T20:49:14,674 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T20:49:14,675 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-12-11T20:49:14,677 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T20:49:14,677 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-12-11T20:49:14,678 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=50, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 224 msec 2024-12-11T20:49:14,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-11T20:49:14,773 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-12-11T20:49:14,779 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='05773ca6b4d1227f801fa72ff1a488568', locateType=CURRENT is [region=testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909., hostname=5be53b084ac7,40355,1733949994820, seqNum=2] 2024-12-11T20:49:14,780 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='1c88206ab51cc10e933399e54a6916560', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:49:14,782 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='2b785ed1e0a230a2f54e62e876c3fdbc3', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:49:14,783 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='380966e11c8340f92c180fe5c487081dd', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:49:14,784 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='46a6a5aa193051980f0fbe80422b3b59c', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:49:14,785 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='570f56d427965553514cd60694f9c4f05', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:49:14,786 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='666e3fa1c86482d11eea0d9c7a44e55e4', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:49:14,786 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:49:14,787 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='1c9c5a1bf4fe64bb7c152e65112e0057', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:49:14,787 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33392, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:49:14,788 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40355 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T20:49:14,789 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53144, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:49:14,792 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39865 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T20:49:14,795 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-11T20:49:14,798 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-12-11T20:49:14,798 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909. 2024-12-11T20:49:14,798 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:49:14,800 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-11T20:49:14,806 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-11T20:49:14,815 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-11T20:49:14,818 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-11T20:49:14,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733950154818 (current time:1733950154818). 2024-12-11T20:49:14,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T20:49:14,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-11T20:49:14,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T20:49:14,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b6df3ea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:14,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:49:14,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:49:14,820 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:49:14,820 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:49:14,820 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:49:14,820 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1234d656, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:14,820 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:49:14,821 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:49:14,821 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:14,821 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34182, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:49:14,822 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c9abdf4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:14,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:49:14,823 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:49:14,823 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:49:14,824 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51310, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:49:14,826 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779. 2024-12-11T20:49:14,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:49:14,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:14,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:14,826 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:49:14,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a9a21f9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:14,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:49:14,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:49:14,828 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:49:14,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:49:14,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:49:14,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cf87d02, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:14,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:49:14,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:49:14,829 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:14,829 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34200, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:49:14,830 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17c9210f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:14,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:49:14,832 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:49:14,832 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:49:14,833 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51314, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:49:14,836 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:49:14,836 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:49:14,837 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53158, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:49:14,838 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779. 2024-12-11T20:49:14,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:49:14,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:14,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:14,838 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:49:14,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-11T20:49:14,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T20:49:14,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-11T20:49:14,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-12-11T20:49:14,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-11T20:49:14,842 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T20:49:14,843 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T20:49:14,846 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T20:49:14,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741914_1090 (size=162) 2024-12-11T20:49:14,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741914_1090 (size=162) 2024-12-11T20:49:14,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741914_1090 (size=162) 2024-12-11T20:49:14,857 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T20:49:14,857 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d8d8acc9953a10e1b739b6c090a7f909}, {pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f7f4b125b358ba36e7f14b1aaa372126}] 2024-12-11T20:49:14,858 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f7f4b125b358ba36e7f14b1aaa372126 2024-12-11T20:49:14,859 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d8d8acc9953a10e1b739b6c090a7f909 2024-12-11T20:49:14,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-11T20:49:15,010 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=55 2024-12-11T20:49:15,010 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126. 2024-12-11T20:49:15,010 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2902): Flushing f7f4b125b358ba36e7f14b1aaa372126 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-11T20:49:15,011 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40355 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=54 2024-12-11T20:49:15,011 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909. 2024-12-11T20:49:15,011 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2902): Flushing d8d8acc9953a10e1b739b6c090a7f909 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-11T20:49:15,027 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/f7f4b125b358ba36e7f14b1aaa372126/.tmp/cf/78dc97ff47b443b1b08a54d81698da62 is 71, key is 1c9c5a1bf4fe64bb7c152e65112e0057/cf:q/1733950154792/Put/seqid=0 2024-12-11T20:49:15,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741915_1091 (size=8394) 2024-12-11T20:49:15,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741915_1091 (size=8394) 2024-12-11T20:49:15,034 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/d8d8acc9953a10e1b739b6c090a7f909/.tmp/cf/9e8b204d5c28443598198168d1d9d487 is 71, key is 06d8ef3cbc78b16321325fe504d8806e/cf:q/1733950154788/Put/seqid=0 2024-12-11T20:49:15,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741915_1091 (size=8394) 2024-12-11T20:49:15,042 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/f7f4b125b358ba36e7f14b1aaa372126/.tmp/cf/78dc97ff47b443b1b08a54d81698da62 2024-12-11T20:49:15,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741916_1092 (size=5216) 2024-12-11T20:49:15,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741916_1092 (size=5216) 2024-12-11T20:49:15,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741916_1092 (size=5216) 2024-12-11T20:49:15,048 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/d8d8acc9953a10e1b739b6c090a7f909/.tmp/cf/9e8b204d5c28443598198168d1d9d487 2024-12-11T20:49:15,056 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/d8d8acc9953a10e1b739b6c090a7f909/.tmp/cf/9e8b204d5c28443598198168d1d9d487 as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/d8d8acc9953a10e1b739b6c090a7f909/cf/9e8b204d5c28443598198168d1d9d487 2024-12-11T20:49:15,062 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/d8d8acc9953a10e1b739b6c090a7f909/cf/9e8b204d5c28443598198168d1d9d487, entries=2, sequenceid=6, filesize=5.1 K 2024-12-11T20:49:15,063 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for d8d8acc9953a10e1b739b6c090a7f909 in 52ms, sequenceid=6, compaction requested=false 2024-12-11T20:49:15,063 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-12-11T20:49:15,064 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2603): Flush status journal for d8d8acc9953a10e1b739b6c090a7f909: 2024-12-11T20:49:15,064 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909. for snaptb0-testExportWithTargetName completed. 2024-12-11T20:49:15,064 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-11T20:49:15,064 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:49:15,064 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/d8d8acc9953a10e1b739b6c090a7f909/cf/9e8b204d5c28443598198168d1d9d487] hfiles 2024-12-11T20:49:15,064 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/d8d8acc9953a10e1b739b6c090a7f909/cf/9e8b204d5c28443598198168d1d9d487 for snapshot=snaptb0-testExportWithTargetName 2024-12-11T20:49:15,065 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/f7f4b125b358ba36e7f14b1aaa372126/.tmp/cf/78dc97ff47b443b1b08a54d81698da62 as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/f7f4b125b358ba36e7f14b1aaa372126/cf/78dc97ff47b443b1b08a54d81698da62 2024-12-11T20:49:15,072 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/f7f4b125b358ba36e7f14b1aaa372126/cf/78dc97ff47b443b1b08a54d81698da62, entries=48, sequenceid=6, filesize=8.2 K 2024-12-11T20:49:15,073 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for f7f4b125b358ba36e7f14b1aaa372126 in 63ms, sequenceid=6, compaction requested=false 2024-12-11T20:49:15,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2603): Flush status journal for f7f4b125b358ba36e7f14b1aaa372126: 2024-12-11T20:49:15,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126. for snaptb0-testExportWithTargetName completed. 2024-12-11T20:49:15,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-11T20:49:15,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:49:15,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/f7f4b125b358ba36e7f14b1aaa372126/cf/78dc97ff47b443b1b08a54d81698da62] hfiles 2024-12-11T20:49:15,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/f7f4b125b358ba36e7f14b1aaa372126/cf/78dc97ff47b443b1b08a54d81698da62 for snapshot=snaptb0-testExportWithTargetName 2024-12-11T20:49:15,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741917_1093 (size=109) 2024-12-11T20:49:15,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741917_1093 (size=109) 2024-12-11T20:49:15,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741917_1093 (size=109) 2024-12-11T20:49:15,076 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909. 2024-12-11T20:49:15,076 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-11T20:49:15,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=54 2024-12-11T20:49:15,077 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region d8d8acc9953a10e1b739b6c090a7f909 2024-12-11T20:49:15,077 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d8d8acc9953a10e1b739b6c090a7f909 2024-12-11T20:49:15,079 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=54, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d8d8acc9953a10e1b739b6c090a7f909 in 221 msec 2024-12-11T20:49:15,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741918_1094 (size=109) 2024-12-11T20:49:15,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741918_1094 (size=109) 2024-12-11T20:49:15,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741918_1094 (size=109) 2024-12-11T20:49:15,106 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126. 2024-12-11T20:49:15,106 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=55 2024-12-11T20:49:15,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=55 2024-12-11T20:49:15,107 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region f7f4b125b358ba36e7f14b1aaa372126 2024-12-11T20:49:15,107 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f7f4b125b358ba36e7f14b1aaa372126 2024-12-11T20:49:15,109 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=55, resume processing ppid=53 2024-12-11T20:49:15,110 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=55, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f7f4b125b358ba36e7f14b1aaa372126 in 251 msec 2024-12-11T20:49:15,110 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T20:49:15,110 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T20:49:15,111 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T20:49:15,111 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-12-11T20:49:15,111 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-12-11T20:49:15,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741919_1095 (size=627) 2024-12-11T20:49:15,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741919_1095 (size=627) 2024-12-11T20:49:15,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741919_1095 (size=627) 2024-12-11T20:49:15,122 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T20:49:15,128 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T20:49:15,129 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-11T20:49:15,130 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T20:49:15,130 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-12-11T20:49:15,131 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=53, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 291 msec 2024-12-11T20:49:15,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-11T20:49:15,163 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-12-11T20:49:15,163 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950155163 2024-12-11T20:49:15,163 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:36717, tgtDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950155163, rawTgtDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950155163, srcFsUri=hdfs://localhost:36717, srcDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:49:15,208 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:36717, inputRoot=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:49:15,209 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_754086280_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950155163, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950155163/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-11T20:49:15,211 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T20:49:15,221 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950155163/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-11T20:49:15,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741921_1097 (size=627) 2024-12-11T20:49:15,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741921_1097 (size=627) 2024-12-11T20:49:15,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741920_1096 (size=162) 2024-12-11T20:49:15,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741920_1096 (size=162) 2024-12-11T20:49:15,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741921_1097 (size=627) 2024-12-11T20:49:15,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741920_1096 (size=162) 2024-12-11T20:49:15,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741922_1098 (size=154) 2024-12-11T20:49:15,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741922_1098 (size=154) 2024-12-11T20:49:15,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741922_1098 (size=154) 2024-12-11T20:49:15,259 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:49:15,259 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:49:15,259 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:49:16,238 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop-2255684381670265832.jar 2024-12-11T20:49:16,238 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:49:16,238 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:49:16,298 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop-6732818341285031742.jar 2024-12-11T20:49:16,299 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:49:16,299 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:49:16,299 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:49:16,299 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:49:16,300 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:49:16,300 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:49:16,300 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T20:49:16,300 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T20:49:16,300 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T20:49:16,301 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T20:49:16,301 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T20:49:16,301 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T20:49:16,301 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T20:49:16,301 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T20:49:16,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T20:49:16,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T20:49:16,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T20:49:16,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:49:16,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:49:16,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:49:16,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:49:16,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:49:16,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:49:16,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:49:16,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741923_1099 (size=131440) 2024-12-11T20:49:16,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741923_1099 (size=131440) 2024-12-11T20:49:16,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741923_1099 (size=131440) 2024-12-11T20:49:16,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741924_1100 (size=443172) 2024-12-11T20:49:16,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741924_1100 (size=443172) 2024-12-11T20:49:16,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741924_1100 (size=443172) 2024-12-11T20:49:16,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741925_1101 (size=4188619) 2024-12-11T20:49:16,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741925_1101 (size=4188619) 2024-12-11T20:49:16,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741925_1101 (size=4188619) 2024-12-11T20:49:16,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741926_1102 (size=1323991) 2024-12-11T20:49:16,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741926_1102 (size=1323991) 2024-12-11T20:49:16,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741926_1102 (size=1323991) 2024-12-11T20:49:16,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741927_1103 (size=903544) 2024-12-11T20:49:16,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741927_1103 (size=903544) 2024-12-11T20:49:16,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741927_1103 (size=903544) 2024-12-11T20:49:16,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741928_1104 (size=8360360) 2024-12-11T20:49:16,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741928_1104 (size=8360360) 2024-12-11T20:49:16,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741928_1104 (size=8360360) 2024-12-11T20:49:16,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741929_1105 (size=1877034) 2024-12-11T20:49:16,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741929_1105 (size=1877034) 2024-12-11T20:49:16,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741929_1105 (size=1877034) 2024-12-11T20:49:16,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741930_1106 (size=6426228) 2024-12-11T20:49:16,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741930_1106 (size=6426228) 2024-12-11T20:49:16,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741930_1106 (size=6426228) 2024-12-11T20:49:16,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741931_1107 (size=77835) 2024-12-11T20:49:16,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741931_1107 (size=77835) 2024-12-11T20:49:16,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741931_1107 (size=77835) 2024-12-11T20:49:16,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741932_1108 (size=30949) 2024-12-11T20:49:16,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741932_1108 (size=30949) 2024-12-11T20:49:16,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741932_1108 (size=30949) 2024-12-11T20:49:16,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741933_1109 (size=1597213) 2024-12-11T20:49:16,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741933_1109 (size=1597213) 2024-12-11T20:49:16,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741933_1109 (size=1597213) 2024-12-11T20:49:16,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741934_1110 (size=4695811) 2024-12-11T20:49:16,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741934_1110 (size=4695811) 2024-12-11T20:49:16,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741934_1110 (size=4695811) 2024-12-11T20:49:16,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741935_1111 (size=232957) 2024-12-11T20:49:16,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741935_1111 (size=232957) 2024-12-11T20:49:16,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741935_1111 (size=232957) 2024-12-11T20:49:16,887 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0001_000001 (auth:SIMPLE) from 127.0.0.1:36822 2024-12-11T20:49:16,913 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_2/usercache/jenkins/appcache/application_1733950008312_0001/container_1733950008312_0001_01_000001/launch_container.sh] 2024-12-11T20:49:16,914 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_2/usercache/jenkins/appcache/application_1733950008312_0001/container_1733950008312_0001_01_000001/container_tokens] 2024-12-11T20:49:16,914 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_2/usercache/jenkins/appcache/application_1733950008312_0001/container_1733950008312_0001_01_000001/sysfs] 2024-12-11T20:49:17,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741936_1112 (size=127628) 2024-12-11T20:49:17,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741936_1112 (size=127628) 2024-12-11T20:49:17,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741936_1112 (size=127628) 2024-12-11T20:49:17,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741937_1113 (size=20406) 2024-12-11T20:49:17,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741937_1113 (size=20406) 2024-12-11T20:49:17,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741937_1113 (size=20406) 2024-12-11T20:49:17,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741938_1114 (size=5175431) 2024-12-11T20:49:17,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741938_1114 (size=5175431) 2024-12-11T20:49:17,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741938_1114 (size=5175431) 2024-12-11T20:49:17,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741939_1115 (size=217634) 2024-12-11T20:49:17,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741939_1115 (size=217634) 2024-12-11T20:49:17,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741939_1115 (size=217634) 2024-12-11T20:49:17,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741940_1116 (size=1832290) 2024-12-11T20:49:17,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741940_1116 (size=1832290) 2024-12-11T20:49:17,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741940_1116 (size=1832290) 2024-12-11T20:49:17,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741941_1117 (size=322274) 2024-12-11T20:49:17,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741941_1117 (size=322274) 2024-12-11T20:49:17,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741941_1117 (size=322274) 2024-12-11T20:49:17,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741942_1118 (size=503880) 2024-12-11T20:49:17,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741942_1118 (size=503880) 2024-12-11T20:49:17,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741942_1118 (size=503880) 2024-12-11T20:49:17,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741943_1119 (size=29229) 2024-12-11T20:49:17,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741943_1119 (size=29229) 2024-12-11T20:49:17,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741943_1119 (size=29229) 2024-12-11T20:49:17,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741944_1120 (size=24096) 2024-12-11T20:49:17,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741944_1120 (size=24096) 2024-12-11T20:49:17,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741944_1120 (size=24096) 2024-12-11T20:49:17,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741945_1121 (size=111872) 2024-12-11T20:49:17,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741945_1121 (size=111872) 2024-12-11T20:49:17,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741945_1121 (size=111872) 2024-12-11T20:49:17,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741946_1122 (size=45609) 2024-12-11T20:49:17,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741946_1122 (size=45609) 2024-12-11T20:49:17,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741946_1122 (size=45609) 2024-12-11T20:49:17,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741947_1123 (size=136454) 2024-12-11T20:49:17,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741947_1123 (size=136454) 2024-12-11T20:49:17,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741947_1123 (size=136454) 2024-12-11T20:49:17,276 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T20:49:17,280 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-12-11T20:49:17,283 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.2 K 2024-12-11T20:49:17,283 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.1 K 2024-12-11T20:49:17,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741948_1124 (size=445) 2024-12-11T20:49:17,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741948_1124 (size=445) 2024-12-11T20:49:17,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741948_1124 (size=445) 2024-12-11T20:49:17,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741949_1125 (size=21) 2024-12-11T20:49:17,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741949_1125 (size=21) 2024-12-11T20:49:17,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741949_1125 (size=21) 2024-12-11T20:49:17,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741950_1126 (size=304003) 2024-12-11T20:49:17,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741950_1126 (size=304003) 2024-12-11T20:49:17,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741950_1126 (size=304003) 2024-12-11T20:49:17,358 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T20:49:17,359 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T20:49:17,466 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0002_000001 (auth:SIMPLE) from 127.0.0.1:44552 2024-12-11T20:49:18,204 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T20:49:23,973 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-11T20:49:23,973 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-12-11T20:49:25,056 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0002_000001 (auth:SIMPLE) from 127.0.0.1:52706 2024-12-11T20:49:25,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741951_1127 (size=349701) 2024-12-11T20:49:25,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741951_1127 (size=349701) 2024-12-11T20:49:25,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741951_1127 (size=349701) 2024-12-11T20:49:27,336 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0002_000001 (auth:SIMPLE) from 127.0.0.1:53940 2024-12-11T20:49:27,336 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0002_000001 (auth:SIMPLE) from 127.0.0.1:44894 2024-12-11T20:49:31,783 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T20:49:34,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741952_1128 (size=5216) 2024-12-11T20:49:34,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741952_1128 (size=5216) 2024-12-11T20:49:34,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741952_1128 (size=5216) 2024-12-11T20:49:35,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741954_1130 (size=8394) 2024-12-11T20:49:35,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741954_1130 (size=8394) 2024-12-11T20:49:35,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741954_1130 (size=8394) 2024-12-11T20:49:35,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741953_1129 (size=22163) 2024-12-11T20:49:35,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741953_1129 (size=22163) 2024-12-11T20:49:35,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741953_1129 (size=22163) 2024-12-11T20:49:35,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741955_1131 (size=465) 2024-12-11T20:49:35,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741955_1131 (size=465) 2024-12-11T20:49:35,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741955_1131 (size=465) 2024-12-11T20:49:35,811 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_3/usercache/jenkins/appcache/application_1733950008312_0002/container_1733950008312_0002_01_000002/launch_container.sh] 2024-12-11T20:49:35,811 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_3/usercache/jenkins/appcache/application_1733950008312_0002/container_1733950008312_0002_01_000002/container_tokens] 2024-12-11T20:49:35,811 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_3/usercache/jenkins/appcache/application_1733950008312_0002/container_1733950008312_0002_01_000002/sysfs] 2024-12-11T20:49:35,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741956_1132 (size=22163) 2024-12-11T20:49:35,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741956_1132 (size=22163) 2024-12-11T20:49:35,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741956_1132 (size=22163) 2024-12-11T20:49:35,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741957_1133 (size=349701) 2024-12-11T20:49:35,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741957_1133 (size=349701) 2024-12-11T20:49:35,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741957_1133 (size=349701) 2024-12-11T20:49:35,991 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0002_000001 (auth:SIMPLE) from 127.0.0.1:60378 2024-12-11T20:49:37,898 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T20:49:37,899 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T20:49:37,961 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: testExportWithTargetName 2024-12-11T20:49:37,961 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T20:49:37,963 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T20:49:37,963 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_754086280_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-11T20:49:37,964 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-12-11T20:49:37,964 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-12-11T20:49:37,964 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_754086280_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950155163/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950155163/.hbase-snapshot/testExportWithTargetName 2024-12-11T20:49:37,964 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950155163/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-12-11T20:49:37,964 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950155163/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-12-11T20:49:37,997 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-12-11T20:49:38,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=56, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName 2024-12-11T20:49:38,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-11T20:49:38,012 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950178011"}]},"ts":"1733950178011"} 2024-12-11T20:49:38,018 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-12-11T20:49:38,018 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-12-11T20:49:38,019 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-12-11T20:49:38,023 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=d8d8acc9953a10e1b739b6c090a7f909, UNASSIGN}, {pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f7f4b125b358ba36e7f14b1aaa372126, UNASSIGN}] 2024-12-11T20:49:38,025 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f7f4b125b358ba36e7f14b1aaa372126, UNASSIGN 2024-12-11T20:49:38,026 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=d8d8acc9953a10e1b739b6c090a7f909, UNASSIGN 2024-12-11T20:49:38,027 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=d8d8acc9953a10e1b739b6c090a7f909, regionState=CLOSING, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:49:38,028 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=f7f4b125b358ba36e7f14b1aaa372126, regionState=CLOSING, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:49:38,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=d8d8acc9953a10e1b739b6c090a7f909, UNASSIGN because future has completed 2024-12-11T20:49:38,031 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T20:49:38,031 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=60, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure d8d8acc9953a10e1b739b6c090a7f909, server=5be53b084ac7,40355,1733949994820}] 2024-12-11T20:49:38,033 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f7f4b125b358ba36e7f14b1aaa372126, UNASSIGN because future has completed 2024-12-11T20:49:38,034 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T20:49:38,034 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=61, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure f7f4b125b358ba36e7f14b1aaa372126, server=5be53b084ac7,39865,1733949995112}] 2024-12-11T20:49:38,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-11T20:49:38,186 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(122): Close d8d8acc9953a10e1b739b6c090a7f909 2024-12-11T20:49:38,187 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T20:49:38,187 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1722): Closing d8d8acc9953a10e1b739b6c090a7f909, disabling compactions & flushes 2024-12-11T20:49:38,187 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909. 2024-12-11T20:49:38,187 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909. 2024-12-11T20:49:38,187 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909. after waiting 0 ms 2024-12-11T20:49:38,187 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909. 2024-12-11T20:49:38,213 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(122): Close f7f4b125b358ba36e7f14b1aaa372126 2024-12-11T20:49:38,214 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T20:49:38,214 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1722): Closing f7f4b125b358ba36e7f14b1aaa372126, disabling compactions & flushes 2024-12-11T20:49:38,214 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126. 2024-12-11T20:49:38,214 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126. 2024-12-11T20:49:38,214 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126. after waiting 0 ms 2024-12-11T20:49:38,214 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126. 2024-12-11T20:49:38,308 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/d8d8acc9953a10e1b739b6c090a7f909/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T20:49:38,309 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:49:38,309 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909. 2024-12-11T20:49:38,309 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1676): Region close journal for d8d8acc9953a10e1b739b6c090a7f909: Waiting for close lock at 1733950178187Running coprocessor pre-close hooks at 1733950178187Disabling compacts and flushes for region at 1733950178187Disabling writes for close at 1733950178187Writing region close event to WAL at 1733950178253 (+66 ms)Running coprocessor post-close hooks at 1733950178309 (+56 ms)Closed at 1733950178309 2024-12-11T20:49:38,313 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(157): Closed d8d8acc9953a10e1b739b6c090a7f909 2024-12-11T20:49:38,314 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=d8d8acc9953a10e1b739b6c090a7f909, regionState=CLOSED 2024-12-11T20:49:38,317 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=60, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure d8d8acc9953a10e1b739b6c090a7f909, server=5be53b084ac7,40355,1733949994820 because future has completed 2024-12-11T20:49:38,322 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=60, resume processing ppid=58 2024-12-11T20:49:38,322 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=60, ppid=58, state=SUCCESS, hasLock=false; CloseRegionProcedure d8d8acc9953a10e1b739b6c090a7f909, server=5be53b084ac7,40355,1733949994820 in 288 msec 2024-12-11T20:49:38,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-11T20:49:38,327 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=58, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=d8d8acc9953a10e1b739b6c090a7f909, UNASSIGN in 300 msec 2024-12-11T20:49:38,341 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/f7f4b125b358ba36e7f14b1aaa372126/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T20:49:38,344 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:49:38,344 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126. 2024-12-11T20:49:38,344 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1676): Region close journal for f7f4b125b358ba36e7f14b1aaa372126: Waiting for close lock at 1733950178214Running coprocessor pre-close hooks at 1733950178214Disabling compacts and flushes for region at 1733950178214Disabling writes for close at 1733950178214Writing region close event to WAL at 1733950178295 (+81 ms)Running coprocessor post-close hooks at 1733950178344 (+49 ms)Closed at 1733950178344 2024-12-11T20:49:38,348 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(157): Closed f7f4b125b358ba36e7f14b1aaa372126 2024-12-11T20:49:38,349 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=f7f4b125b358ba36e7f14b1aaa372126, regionState=CLOSED 2024-12-11T20:49:38,353 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=61, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure f7f4b125b358ba36e7f14b1aaa372126, server=5be53b084ac7,39865,1733949995112 because future has completed 2024-12-11T20:49:38,358 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=61, resume processing ppid=59 2024-12-11T20:49:38,358 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=61, ppid=59, state=SUCCESS, hasLock=false; CloseRegionProcedure f7f4b125b358ba36e7f14b1aaa372126, server=5be53b084ac7,39865,1733949995112 in 321 msec 2024-12-11T20:49:38,362 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=59, resume processing ppid=57 2024-12-11T20:49:38,362 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=59, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f7f4b125b358ba36e7f14b1aaa372126, UNASSIGN in 335 msec 2024-12-11T20:49:38,365 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=57, resume processing ppid=56 2024-12-11T20:49:38,365 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=57, ppid=56, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 344 msec 2024-12-11T20:49:38,368 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950178367"}]},"ts":"1733950178367"} 2024-12-11T20:49:38,380 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-12-11T20:49:38,380 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-12-11T20:49:38,385 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=56, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName in 385 msec 2024-12-11T20:49:38,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-11T20:49:38,634 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-11T20:49:38,639 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-12-11T20:49:38,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-11T20:49:38,644 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-11T20:49:38,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithTargetName 2024-12-11T20:49:38,648 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=62, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-11T20:49:38,652 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-12-11T20:49:38,658 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/d8d8acc9953a10e1b739b6c090a7f909 2024-12-11T20:49:38,659 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/f7f4b125b358ba36e7f14b1aaa372126 2024-12-11T20:49:38,663 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/f7f4b125b358ba36e7f14b1aaa372126/cf, FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/f7f4b125b358ba36e7f14b1aaa372126/recovered.edits] 2024-12-11T20:49:38,664 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/d8d8acc9953a10e1b739b6c090a7f909/cf, FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/d8d8acc9953a10e1b739b6c090a7f909/recovered.edits] 2024-12-11T20:49:38,669 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/f7f4b125b358ba36e7f14b1aaa372126/cf/78dc97ff47b443b1b08a54d81698da62 to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportWithTargetName/f7f4b125b358ba36e7f14b1aaa372126/cf/78dc97ff47b443b1b08a54d81698da62 2024-12-11T20:49:38,670 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/d8d8acc9953a10e1b739b6c090a7f909/cf/9e8b204d5c28443598198168d1d9d487 to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportWithTargetName/d8d8acc9953a10e1b739b6c090a7f909/cf/9e8b204d5c28443598198168d1d9d487 2024-12-11T20:49:38,690 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/f7f4b125b358ba36e7f14b1aaa372126/recovered.edits/9.seqid to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportWithTargetName/f7f4b125b358ba36e7f14b1aaa372126/recovered.edits/9.seqid 2024-12-11T20:49:38,694 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/d8d8acc9953a10e1b739b6c090a7f909/recovered.edits/9.seqid to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportWithTargetName/d8d8acc9953a10e1b739b6c090a7f909/recovered.edits/9.seqid 2024-12-11T20:49:38,696 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/f7f4b125b358ba36e7f14b1aaa372126 2024-12-11T20:49:38,698 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithTargetName/d8d8acc9953a10e1b739b6c090a7f909 2024-12-11T20:49:38,698 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-12-11T20:49:38,702 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=62, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-11T20:49:38,708 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-12-11T20:49:38,714 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithTargetName' descriptor. 2024-12-11T20:49:38,717 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=62, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-11T20:49:38,717 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithTargetName' from region states. 2024-12-11T20:49:38,717 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733950178717"}]},"ts":"9223372036854775807"} 2024-12-11T20:49:38,717 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733950178717"}]},"ts":"9223372036854775807"} 2024-12-11T20:49:38,721 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T20:49:38,721 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => d8d8acc9953a10e1b739b6c090a7f909, NAME => 'testtb-testExportWithTargetName,,1733950153787.d8d8acc9953a10e1b739b6c090a7f909.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => f7f4b125b358ba36e7f14b1aaa372126, NAME => 'testtb-testExportWithTargetName,1,1733950153787.f7f4b125b358ba36e7f14b1aaa372126.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T20:49:38,722 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithTargetName' as deleted. 2024-12-11T20:49:38,722 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733950178722"}]},"ts":"9223372036854775807"} 2024-12-11T20:49:38,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-11T20:49:38,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-11T20:49:38,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-11T20:49:38,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-11T20:49:38,726 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-11T20:49:38,726 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-11T20:49:38,726 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-11T20:49:38,726 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithTargetName state from META 2024-12-11T20:49:38,728 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=62, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-11T20:49:38,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-11T20:49:38,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:49:38,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-11T20:49:38,732 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=62, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName in 88 msec 2024-12-11T20:49:38,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-11T20:49:38,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:49:38,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:49:38,733 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data null 2024-12-11T20:49:38,733 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-11T20:49:38,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:49:38,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-12-11T20:49:38,737 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithTargetName 2024-12-11T20:49:38,737 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-11T20:49:38,750 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" type: DISABLED 2024-12-11T20:49:38,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-12-11T20:49:38,758 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" type: DISABLED 2024-12-11T20:49:38,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithTargetName 2024-12-11T20:49:38,815 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=783 (was 764) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34025 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2031 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_754086280_22 at /127.0.0.1:44854 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (166867004) connection to localhost/127.0.0.1:40711 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 110977) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40711 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_754086280_22 at /127.0.0.1:47422 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2088350392_1 at /127.0.0.1:44828 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (166867004) connection to localhost/127.0.0.1:34025 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_754086280_22 at /127.0.0.1:51440 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=796 (was 781) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=869 (was 664) - SystemLoadAverage LEAK? -, ProcessCount=26 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=1413 (was 704) - AvailableMemoryMB LEAK? - 2024-12-11T20:49:38,815 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=783 is superior to 500 2024-12-11T20:49:38,893 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=783, OpenFileDescriptor=796, MaxFileDescriptor=1048576, SystemLoadAverage=869, ProcessCount=26, AvailableMemoryMB=1408 2024-12-11T20:49:38,893 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=783 is superior to 500 2024-12-11T20:49:38,897 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T20:49:38,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-12-11T20:49:38,901 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T20:49:38,901 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:49:38,902 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 63 2024-12-11T20:49:38,904 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T20:49:38,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-11T20:49:38,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741958_1134 (size=404) 2024-12-11T20:49:38,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741958_1134 (size=404) 2024-12-11T20:49:38,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741958_1134 (size=404) 2024-12-11T20:49:38,970 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d1b71d1f925f71479f96b8b2219aa395, NAME => 'testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:49:38,988 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 6d795aa0ba7ea8a1773cf88e970f7989, NAME => 'testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:49:39,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-11T20:49:39,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741959_1135 (size=65) 2024-12-11T20:49:39,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741959_1135 (size=65) 2024-12-11T20:49:39,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741959_1135 (size=65) 2024-12-11T20:49:39,063 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:49:39,063 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing d1b71d1f925f71479f96b8b2219aa395, disabling compactions & flushes 2024-12-11T20:49:39,063 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395. 2024-12-11T20:49:39,063 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395. 2024-12-11T20:49:39,063 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395. after waiting 0 ms 2024-12-11T20:49:39,063 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395. 2024-12-11T20:49:39,063 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395. 2024-12-11T20:49:39,063 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for d1b71d1f925f71479f96b8b2219aa395: Waiting for close lock at 1733950179063Disabling compacts and flushes for region at 1733950179063Disabling writes for close at 1733950179063Writing region close event to WAL at 1733950179063Closed at 1733950179063 2024-12-11T20:49:39,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741960_1136 (size=65) 2024-12-11T20:49:39,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741960_1136 (size=65) 2024-12-11T20:49:39,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741960_1136 (size=65) 2024-12-11T20:49:39,087 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:49:39,088 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 6d795aa0ba7ea8a1773cf88e970f7989, disabling compactions & flushes 2024-12-11T20:49:39,088 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989. 2024-12-11T20:49:39,088 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989. 2024-12-11T20:49:39,088 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989. after waiting 0 ms 2024-12-11T20:49:39,088 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989. 2024-12-11T20:49:39,088 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989. 2024-12-11T20:49:39,088 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 6d795aa0ba7ea8a1773cf88e970f7989: Waiting for close lock at 1733950179088Disabling compacts and flushes for region at 1733950179088Disabling writes for close at 1733950179088Writing region close event to WAL at 1733950179088Closed at 1733950179088 2024-12-11T20:49:39,089 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T20:49:39,090 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733950179089"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950179089"}]},"ts":"1733950179089"} 2024-12-11T20:49:39,090 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733950179089"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950179089"}]},"ts":"1733950179089"} 2024-12-11T20:49:39,093 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T20:49:39,095 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T20:49:39,095 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950179095"}]},"ts":"1733950179095"} 2024-12-11T20:49:39,103 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-11T20:49:39,105 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {5be53b084ac7=0} racks are {/default-rack=0} 2024-12-11T20:49:39,107 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T20:49:39,107 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T20:49:39,107 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T20:49:39,107 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T20:49:39,107 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T20:49:39,107 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T20:49:39,107 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T20:49:39,107 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T20:49:39,107 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T20:49:39,107 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T20:49:39,108 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=d1b71d1f925f71479f96b8b2219aa395, ASSIGN}, {pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=6d795aa0ba7ea8a1773cf88e970f7989, ASSIGN}] 2024-12-11T20:49:39,110 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=d1b71d1f925f71479f96b8b2219aa395, ASSIGN 2024-12-11T20:49:39,116 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=d1b71d1f925f71479f96b8b2219aa395, ASSIGN; state=OFFLINE, location=5be53b084ac7,40781,1733949995244; forceNewPlan=false, retain=false 2024-12-11T20:49:39,120 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=6d795aa0ba7ea8a1773cf88e970f7989, ASSIGN 2024-12-11T20:49:39,121 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=6d795aa0ba7ea8a1773cf88e970f7989, ASSIGN; state=OFFLINE, location=5be53b084ac7,40355,1733949994820; forceNewPlan=false, retain=false 2024-12-11T20:49:39,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-11T20:49:39,267 INFO [5be53b084ac7:41779 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T20:49:39,267 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=d1b71d1f925f71479f96b8b2219aa395, regionState=OPENING, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:49:39,267 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=6d795aa0ba7ea8a1773cf88e970f7989, regionState=OPENING, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:49:39,275 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=6d795aa0ba7ea8a1773cf88e970f7989, ASSIGN because future has completed 2024-12-11T20:49:39,276 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6d795aa0ba7ea8a1773cf88e970f7989, server=5be53b084ac7,40355,1733949994820}] 2024-12-11T20:49:39,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=d1b71d1f925f71479f96b8b2219aa395, ASSIGN because future has completed 2024-12-11T20:49:39,278 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=67, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure d1b71d1f925f71479f96b8b2219aa395, server=5be53b084ac7,40781,1733949995244}] 2024-12-11T20:49:39,440 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989. 2024-12-11T20:49:39,441 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7752): Opening region: {ENCODED => 6d795aa0ba7ea8a1773cf88e970f7989, NAME => 'testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T20:49:39,441 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989. service=AccessControlService 2024-12-11T20:49:39,442 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:49:39,442 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 6d795aa0ba7ea8a1773cf88e970f7989 2024-12-11T20:49:39,442 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:49:39,442 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7794): checking encryption for 6d795aa0ba7ea8a1773cf88e970f7989 2024-12-11T20:49:39,442 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7797): checking classloading for 6d795aa0ba7ea8a1773cf88e970f7989 2024-12-11T20:49:39,452 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395. 2024-12-11T20:49:39,452 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7752): Opening region: {ENCODED => d1b71d1f925f71479f96b8b2219aa395, NAME => 'testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T20:49:39,453 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395. service=AccessControlService 2024-12-11T20:49:39,453 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:49:39,453 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl d1b71d1f925f71479f96b8b2219aa395 2024-12-11T20:49:39,453 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:49:39,453 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7794): checking encryption for d1b71d1f925f71479f96b8b2219aa395 2024-12-11T20:49:39,454 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7797): checking classloading for d1b71d1f925f71479f96b8b2219aa395 2024-12-11T20:49:39,454 INFO [StoreOpener-6d795aa0ba7ea8a1773cf88e970f7989-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6d795aa0ba7ea8a1773cf88e970f7989 2024-12-11T20:49:39,459 INFO [StoreOpener-d1b71d1f925f71479f96b8b2219aa395-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d1b71d1f925f71479f96b8b2219aa395 2024-12-11T20:49:39,460 INFO [StoreOpener-6d795aa0ba7ea8a1773cf88e970f7989-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6d795aa0ba7ea8a1773cf88e970f7989 columnFamilyName cf 2024-12-11T20:49:39,461 DEBUG [StoreOpener-6d795aa0ba7ea8a1773cf88e970f7989-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:49:39,461 INFO [StoreOpener-6d795aa0ba7ea8a1773cf88e970f7989-1 {}] regionserver.HStore(327): Store=6d795aa0ba7ea8a1773cf88e970f7989/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:49:39,461 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1038): replaying wal for 6d795aa0ba7ea8a1773cf88e970f7989 2024-12-11T20:49:39,463 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/6d795aa0ba7ea8a1773cf88e970f7989 2024-12-11T20:49:39,464 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/6d795aa0ba7ea8a1773cf88e970f7989 2024-12-11T20:49:39,464 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1048): stopping wal replay for 6d795aa0ba7ea8a1773cf88e970f7989 2024-12-11T20:49:39,464 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1060): Cleaning up temporary data for 6d795aa0ba7ea8a1773cf88e970f7989 2024-12-11T20:49:39,473 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1093): writing seq id for 6d795aa0ba7ea8a1773cf88e970f7989 2024-12-11T20:49:39,482 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/6d795aa0ba7ea8a1773cf88e970f7989/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:49:39,483 INFO [StoreOpener-d1b71d1f925f71479f96b8b2219aa395-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d1b71d1f925f71479f96b8b2219aa395 columnFamilyName cf 2024-12-11T20:49:39,483 DEBUG [StoreOpener-d1b71d1f925f71479f96b8b2219aa395-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:49:39,483 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1114): Opened 6d795aa0ba7ea8a1773cf88e970f7989; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71231972, jitterRate=0.06143909692764282}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:49:39,484 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6d795aa0ba7ea8a1773cf88e970f7989 2024-12-11T20:49:39,485 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1006): Region open journal for 6d795aa0ba7ea8a1773cf88e970f7989: Running coprocessor pre-open hook at 1733950179442Writing region info on filesystem at 1733950179443 (+1 ms)Initializing all the Stores at 1733950179449 (+6 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950179449Cleaning up temporary data from old regions at 1733950179464 (+15 ms)Running coprocessor post-open hooks at 1733950179484 (+20 ms)Region opened successfully at 1733950179485 (+1 ms) 2024-12-11T20:49:39,488 INFO [StoreOpener-d1b71d1f925f71479f96b8b2219aa395-1 {}] regionserver.HStore(327): Store=d1b71d1f925f71479f96b8b2219aa395/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:49:39,489 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1038): replaying wal for d1b71d1f925f71479f96b8b2219aa395 2024-12-11T20:49:39,490 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/d1b71d1f925f71479f96b8b2219aa395 2024-12-11T20:49:39,491 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/d1b71d1f925f71479f96b8b2219aa395 2024-12-11T20:49:39,492 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1048): stopping wal replay for d1b71d1f925f71479f96b8b2219aa395 2024-12-11T20:49:39,492 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1060): Cleaning up temporary data for d1b71d1f925f71479f96b8b2219aa395 2024-12-11T20:49:39,495 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989., pid=66, masterSystemTime=1733950179433 2024-12-11T20:49:39,501 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1093): writing seq id for d1b71d1f925f71479f96b8b2219aa395 2024-12-11T20:49:39,502 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989. 2024-12-11T20:49:39,502 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989. 2024-12-11T20:49:39,516 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=6d795aa0ba7ea8a1773cf88e970f7989, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:49:39,520 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/d1b71d1f925f71479f96b8b2219aa395/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:49:39,520 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1114): Opened d1b71d1f925f71479f96b8b2219aa395; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70271856, jitterRate=0.047132253646850586}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:49:39,521 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d1b71d1f925f71479f96b8b2219aa395 2024-12-11T20:49:39,521 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1006): Region open journal for d1b71d1f925f71479f96b8b2219aa395: Running coprocessor pre-open hook at 1733950179454Writing region info on filesystem at 1733950179454Initializing all the Stores at 1733950179458 (+4 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950179458Cleaning up temporary data from old regions at 1733950179492 (+34 ms)Running coprocessor post-open hooks at 1733950179521 (+29 ms)Region opened successfully at 1733950179521 2024-12-11T20:49:39,522 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395., pid=67, masterSystemTime=1733950179433 2024-12-11T20:49:39,524 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=66, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6d795aa0ba7ea8a1773cf88e970f7989, server=5be53b084ac7,40355,1733949994820 because future has completed 2024-12-11T20:49:39,526 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395. 2024-12-11T20:49:39,526 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395. 2024-12-11T20:49:39,527 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=d1b71d1f925f71479f96b8b2219aa395, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:49:39,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-11T20:49:39,539 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=66, resume processing ppid=65 2024-12-11T20:49:39,539 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=66, ppid=65, state=SUCCESS, hasLock=false; OpenRegionProcedure 6d795aa0ba7ea8a1773cf88e970f7989, server=5be53b084ac7,40355,1733949994820 in 251 msec 2024-12-11T20:49:39,541 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=65, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=6d795aa0ba7ea8a1773cf88e970f7989, ASSIGN in 431 msec 2024-12-11T20:49:39,544 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=67, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure d1b71d1f925f71479f96b8b2219aa395, server=5be53b084ac7,40781,1733949995244 because future has completed 2024-12-11T20:49:39,549 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=67, resume processing ppid=64 2024-12-11T20:49:39,549 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=67, ppid=64, state=SUCCESS, hasLock=false; OpenRegionProcedure d1b71d1f925f71479f96b8b2219aa395, server=5be53b084ac7,40781,1733949995244 in 267 msec 2024-12-11T20:49:39,556 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T20:49:39,557 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950179556"}]},"ts":"1733950179556"} 2024-12-11T20:49:39,559 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=64, resume processing ppid=63 2024-12-11T20:49:39,559 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=64, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=d1b71d1f925f71479f96b8b2219aa395, ASSIGN in 441 msec 2024-12-11T20:49:39,565 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-11T20:49:39,568 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T20:49:39,568 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-12-11T20:49:39,573 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-11T20:49:39,623 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:49:39,623 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:49:39,623 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:49:39,623 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:49:39,632 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T20:49:39,632 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T20:49:39,632 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T20:49:39,634 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T20:49:39,636 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=63, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl in 735 msec 2024-12-11T20:49:40,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-11T20:49:40,044 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-11T20:49:40,044 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithResetTtl get assigned. Timeout = 60000ms 2024-12-11T20:49:40,045 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:49:40,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithResetTtl assigned to meta. Checking AM states. 2024-12-11T20:49:40,058 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:49:40,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithResetTtl assigned. 2024-12-11T20:49:40,058 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T20:49:40,065 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-11T20:49:40,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733950180065 (current time:1733950180065). 2024-12-11T20:49:40,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T20:49:40,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-11T20:49:40,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T20:49:40,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ecf631a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:40,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:49:40,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:49:40,084 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:49:40,084 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:49:40,084 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:49:40,084 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@275b4c0d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:40,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:49:40,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:49:40,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:40,095 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51856, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:49:40,097 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a159965, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:40,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:49:40,098 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:49:40,099 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:49:40,104 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41160, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:49:40,105 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:49:40,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:49:40,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:40,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:40,106 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:49:40,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ecccd86, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:40,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:49:40,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:49:40,119 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:49:40,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:49:40,121 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:49:40,121 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ff6f37, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:40,121 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:49:40,121 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:49:40,121 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:40,127 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51870, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:49:40,130 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5afdd5e7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:40,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:49:40,132 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:49:40,132 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:49:40,134 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41174, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:49:40,138 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:49:40,138 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:49:40,140 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40588, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:49:40,141 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:49:40,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:49:40,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:40,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:40,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-11T20:49:40,142 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:49:40,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T20:49:40,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-11T20:49:40,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-11T20:49:40,147 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T20:49:40,149 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T20:49:40,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-11T20:49:40,153 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T20:49:40,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741961_1137 (size=161) 2024-12-11T20:49:40,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741961_1137 (size=161) 2024-12-11T20:49:40,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741961_1137 (size=161) 2024-12-11T20:49:40,224 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T20:49:40,224 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d1b71d1f925f71479f96b8b2219aa395}, {pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6d795aa0ba7ea8a1773cf88e970f7989}] 2024-12-11T20:49:40,225 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d1b71d1f925f71479f96b8b2219aa395 2024-12-11T20:49:40,228 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6d795aa0ba7ea8a1773cf88e970f7989 2024-12-11T20:49:40,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-11T20:49:40,351 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_3/usercache/jenkins/appcache/application_1733950008312_0002/container_1733950008312_0002_01_000003/launch_container.sh] 2024-12-11T20:49:40,351 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_3/usercache/jenkins/appcache/application_1733950008312_0002/container_1733950008312_0002_01_000003/container_tokens] 2024-12-11T20:49:40,352 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_3/usercache/jenkins/appcache/application_1733950008312_0002/container_1733950008312_0002_01_000003/sysfs] 2024-12-11T20:49:40,380 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40781 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=69 2024-12-11T20:49:40,380 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40355 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=70 2024-12-11T20:49:40,380 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989. 2024-12-11T20:49:40,381 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.HRegion(2603): Flush status journal for 6d795aa0ba7ea8a1773cf88e970f7989: 2024-12-11T20:49:40,381 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-11T20:49:40,381 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395. 2024-12-11T20:49:40,381 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.HRegion(2603): Flush status journal for d1b71d1f925f71479f96b8b2219aa395: 2024-12-11T20:49:40,381 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-11T20:49:40,381 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-11T20:49:40,381 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:49:40,381 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T20:49:40,381 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-11T20:49:40,381 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:49:40,381 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T20:49:40,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741962_1138 (size=68) 2024-12-11T20:49:40,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741962_1138 (size=68) 2024-12-11T20:49:40,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741963_1139 (size=68) 2024-12-11T20:49:40,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741963_1139 (size=68) 2024-12-11T20:49:40,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741963_1139 (size=68) 2024-12-11T20:49:40,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741962_1138 (size=68) 2024-12-11T20:49:40,406 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989. 2024-12-11T20:49:40,406 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395. 2024-12-11T20:49:40,406 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=69 2024-12-11T20:49:40,406 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-11T20:49:40,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=69 2024-12-11T20:49:40,407 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region d1b71d1f925f71479f96b8b2219aa395 2024-12-11T20:49:40,407 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d1b71d1f925f71479f96b8b2219aa395 2024-12-11T20:49:40,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=70 2024-12-11T20:49:40,408 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 6d795aa0ba7ea8a1773cf88e970f7989 2024-12-11T20:49:40,409 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6d795aa0ba7ea8a1773cf88e970f7989 2024-12-11T20:49:40,410 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=69, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d1b71d1f925f71479f96b8b2219aa395 in 185 msec 2024-12-11T20:49:40,412 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=70, resume processing ppid=68 2024-12-11T20:49:40,412 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=70, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6d795aa0ba7ea8a1773cf88e970f7989 in 186 msec 2024-12-11T20:49:40,412 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T20:49:40,413 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T20:49:40,413 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T20:49:40,413 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-12-11T20:49:40,414 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-12-11T20:49:40,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-11T20:49:40,546 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-12-11T20:49:40,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741964_1140 (size=543) 2024-12-11T20:49:40,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741964_1140 (size=543) 2024-12-11T20:49:40,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741964_1140 (size=543) 2024-12-11T20:49:40,566 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T20:49:40,580 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T20:49:40,582 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-12-11T20:49:40,584 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T20:49:40,584 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-11T20:49:40,588 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=68, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 441 msec 2024-12-11T20:49:40,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-11T20:49:40,773 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-11T20:49:40,784 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='0d27f3f8e1efbe8a1d3ad262d584d5a4e', locateType=CURRENT is [region=testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:49:40,787 DEBUG [Time-limited test {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='1c3b33c572c6b9eba33b7e84afb075c3d', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989., hostname=5be53b084ac7,40355,1733949994820, seqNum=2] 2024-12-11T20:49:40,809 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40355 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T20:49:40,818 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40781 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T20:49:40,819 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T20:49:40,823 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-11T20:49:40,823 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395. 2024-12-11T20:49:40,823 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:49:40,826 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T20:49:40,835 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T20:49:40,845 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T20:49:40,849 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-11T20:49:40,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733950180849 (current time:1733950180849). 2024-12-11T20:49:40,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T20:49:40,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-11T20:49:40,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T20:49:40,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46b3397, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:40,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:49:40,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:49:40,852 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:49:40,852 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:49:40,852 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:49:40,853 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7330faf0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:40,853 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:49:40,853 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:49:40,853 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:40,855 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51894, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:49:40,856 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ee18763, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:40,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:49:40,858 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:49:40,858 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:49:40,860 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41182, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:49:40,862 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:49:40,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:49:40,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:40,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:40,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18a554bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:40,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:49:40,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:49:40,865 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:49:40,865 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:49:40,865 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:49:40,865 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:49:40,865 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e4e5e87, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:40,865 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:49:40,866 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:49:40,866 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:40,867 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35914, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:49:40,869 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2684ddf5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:40,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:49:40,871 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:49:40,871 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:49:40,873 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51342, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:49:40,875 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:49:40,876 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:49:40,877 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33814, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:49:40,879 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:49:40,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:49:40,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:40,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:40,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-11T20:49:40,880 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:49:40,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T20:49:40,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-11T20:49:40,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-11T20:49:40,884 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T20:49:40,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-11T20:49:40,886 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T20:49:40,890 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T20:49:40,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741965_1141 (size=156) 2024-12-11T20:49:40,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741965_1141 (size=156) 2024-12-11T20:49:40,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741965_1141 (size=156) 2024-12-11T20:49:40,921 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T20:49:40,921 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d1b71d1f925f71479f96b8b2219aa395}, {pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6d795aa0ba7ea8a1773cf88e970f7989}] 2024-12-11T20:49:40,923 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d1b71d1f925f71479f96b8b2219aa395 2024-12-11T20:49:40,923 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6d795aa0ba7ea8a1773cf88e970f7989 2024-12-11T20:49:40,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-11T20:49:41,079 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40355 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=73 2024-12-11T20:49:41,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989. 2024-12-11T20:49:41,080 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2902): Flushing 6d795aa0ba7ea8a1773cf88e970f7989 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-11T20:49:41,081 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40781 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=72 2024-12-11T20:49:41,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395. 2024-12-11T20:49:41,082 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2902): Flushing d1b71d1f925f71479f96b8b2219aa395 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-11T20:49:41,113 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/d1b71d1f925f71479f96b8b2219aa395/.tmp/cf/629cc8c4610a4d0ea754dab20c44e769 is 71, key is 0543f84c4a8cc01aab6f54a97ef928b4/cf:q/1733950180818/Put/seqid=0 2024-12-11T20:49:41,116 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/6d795aa0ba7ea8a1773cf88e970f7989/.tmp/cf/734fd0a4e15d49129bffbd9d6a27a8ee is 71, key is 10d43a1f9b6ab718b489c7e15827c867/cf:q/1733950180808/Put/seqid=0 2024-12-11T20:49:41,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741966_1142 (size=5216) 2024-12-11T20:49:41,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741966_1142 (size=5216) 2024-12-11T20:49:41,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741967_1143 (size=8392) 2024-12-11T20:49:41,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741967_1143 (size=8392) 2024-12-11T20:49:41,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741967_1143 (size=8392) 2024-12-11T20:49:41,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741966_1142 (size=5216) 2024-12-11T20:49:41,135 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/6d795aa0ba7ea8a1773cf88e970f7989/.tmp/cf/734fd0a4e15d49129bffbd9d6a27a8ee 2024-12-11T20:49:41,135 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/d1b71d1f925f71479f96b8b2219aa395/.tmp/cf/629cc8c4610a4d0ea754dab20c44e769 2024-12-11T20:49:41,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/6d795aa0ba7ea8a1773cf88e970f7989/.tmp/cf/734fd0a4e15d49129bffbd9d6a27a8ee as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/6d795aa0ba7ea8a1773cf88e970f7989/cf/734fd0a4e15d49129bffbd9d6a27a8ee 2024-12-11T20:49:41,145 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/d1b71d1f925f71479f96b8b2219aa395/.tmp/cf/629cc8c4610a4d0ea754dab20c44e769 as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/d1b71d1f925f71479f96b8b2219aa395/cf/629cc8c4610a4d0ea754dab20c44e769 2024-12-11T20:49:41,163 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/6d795aa0ba7ea8a1773cf88e970f7989/cf/734fd0a4e15d49129bffbd9d6a27a8ee, entries=48, sequenceid=6, filesize=8.2 K 2024-12-11T20:49:41,165 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 6d795aa0ba7ea8a1773cf88e970f7989 in 86ms, sequenceid=6, compaction requested=false 2024-12-11T20:49:41,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2603): Flush status journal for 6d795aa0ba7ea8a1773cf88e970f7989: 2024-12-11T20:49:41,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989. for snaptb0-testExportWithResetTtl completed. 2024-12-11T20:49:41,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-11T20:49:41,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:49:41,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/6d795aa0ba7ea8a1773cf88e970f7989/cf/734fd0a4e15d49129bffbd9d6a27a8ee] hfiles 2024-12-11T20:49:41,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/6d795aa0ba7ea8a1773cf88e970f7989/cf/734fd0a4e15d49129bffbd9d6a27a8ee for snapshot=snaptb0-testExportWithResetTtl 2024-12-11T20:49:41,166 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/d1b71d1f925f71479f96b8b2219aa395/cf/629cc8c4610a4d0ea754dab20c44e769, entries=2, sequenceid=6, filesize=5.1 K 2024-12-11T20:49:41,167 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for d1b71d1f925f71479f96b8b2219aa395 in 85ms, sequenceid=6, compaction requested=false 2024-12-11T20:49:41,167 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2603): Flush status journal for d1b71d1f925f71479f96b8b2219aa395: 2024-12-11T20:49:41,167 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395. for snaptb0-testExportWithResetTtl completed. 2024-12-11T20:49:41,167 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-11T20:49:41,167 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:49:41,167 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/d1b71d1f925f71479f96b8b2219aa395/cf/629cc8c4610a4d0ea754dab20c44e769] hfiles 2024-12-11T20:49:41,167 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/d1b71d1f925f71479f96b8b2219aa395/cf/629cc8c4610a4d0ea754dab20c44e769 for snapshot=snaptb0-testExportWithResetTtl 2024-12-11T20:49:41,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741968_1144 (size=107) 2024-12-11T20:49:41,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741968_1144 (size=107) 2024-12-11T20:49:41,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741968_1144 (size=107) 2024-12-11T20:49:41,187 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395. 2024-12-11T20:49:41,187 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-11T20:49:41,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=72 2024-12-11T20:49:41,188 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region d1b71d1f925f71479f96b8b2219aa395 2024-12-11T20:49:41,188 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d1b71d1f925f71479f96b8b2219aa395 2024-12-11T20:49:41,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741969_1145 (size=107) 2024-12-11T20:49:41,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741969_1145 (size=107) 2024-12-11T20:49:41,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741969_1145 (size=107) 2024-12-11T20:49:41,194 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=72, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d1b71d1f925f71479f96b8b2219aa395 in 271 msec 2024-12-11T20:49:41,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-11T20:49:41,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-11T20:49:41,591 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989. 2024-12-11T20:49:41,591 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=73 2024-12-11T20:49:41,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=73 2024-12-11T20:49:41,592 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 6d795aa0ba7ea8a1773cf88e970f7989 2024-12-11T20:49:41,592 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6d795aa0ba7ea8a1773cf88e970f7989 2024-12-11T20:49:41,594 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=73, resume processing ppid=71 2024-12-11T20:49:41,594 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T20:49:41,594 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=73, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6d795aa0ba7ea8a1773cf88e970f7989 in 672 msec 2024-12-11T20:49:41,595 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T20:49:41,596 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T20:49:41,596 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-12-11T20:49:41,596 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-12-11T20:49:41,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741970_1146 (size=621) 2024-12-11T20:49:41,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741970_1146 (size=621) 2024-12-11T20:49:41,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741970_1146 (size=621) 2024-12-11T20:49:41,607 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T20:49:41,613 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T20:49:41,613 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-12-11T20:49:41,615 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T20:49:41,615 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-11T20:49:41,616 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=71, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 734 msec 2024-12-11T20:49:42,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-11T20:49:42,023 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-11T20:49:42,024 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T20:49:42,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportWithResetTtl 2024-12-11T20:49:42,027 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T20:49:42,027 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:49:42,027 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 74 2024-12-11T20:49:42,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-11T20:49:42,028 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T20:49:42,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741971_1147 (size=397) 2024-12-11T20:49:42,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741971_1147 (size=397) 2024-12-11T20:49:42,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741971_1147 (size=397) 2024-12-11T20:49:42,040 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 2326ced8b9e7212a1bb352804b24ffc8, NAME => 'testExportWithResetTtl,,1733950182024.2326ced8b9e7212a1bb352804b24ffc8.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:49:42,041 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 308600b7dd70ac2d1d6971bff5e26a42, NAME => 'testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:49:42,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741973_1149 (size=58) 2024-12-11T20:49:42,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741972_1148 (size=58) 2024-12-11T20:49:42,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741973_1149 (size=58) 2024-12-11T20:49:42,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741973_1149 (size=58) 2024-12-11T20:49:42,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741972_1148 (size=58) 2024-12-11T20:49:42,048 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:49:42,048 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1733950182024.2326ced8b9e7212a1bb352804b24ffc8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:49:42,049 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 2326ced8b9e7212a1bb352804b24ffc8, disabling compactions & flushes 2024-12-11T20:49:42,049 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 308600b7dd70ac2d1d6971bff5e26a42, disabling compactions & flushes 2024-12-11T20:49:42,049 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1733950182024.2326ced8b9e7212a1bb352804b24ffc8. 2024-12-11T20:49:42,049 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42. 2024-12-11T20:49:42,049 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1733950182024.2326ced8b9e7212a1bb352804b24ffc8. 2024-12-11T20:49:42,049 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42. 2024-12-11T20:49:42,049 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1733950182024.2326ced8b9e7212a1bb352804b24ffc8. after waiting 0 ms 2024-12-11T20:49:42,049 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42. after waiting 0 ms 2024-12-11T20:49:42,049 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1733950182024.2326ced8b9e7212a1bb352804b24ffc8. 2024-12-11T20:49:42,049 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42. 2024-12-11T20:49:42,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741972_1148 (size=58) 2024-12-11T20:49:42,049 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1733950182024.2326ced8b9e7212a1bb352804b24ffc8. 2024-12-11T20:49:42,049 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42. 2024-12-11T20:49:42,049 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 2326ced8b9e7212a1bb352804b24ffc8: Waiting for close lock at 1733950182049Disabling compacts and flushes for region at 1733950182049Disabling writes for close at 1733950182049Writing region close event to WAL at 1733950182049Closed at 1733950182049 2024-12-11T20:49:42,049 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 308600b7dd70ac2d1d6971bff5e26a42: Waiting for close lock at 1733950182049Disabling compacts and flushes for region at 1733950182049Disabling writes for close at 1733950182049Writing region close event to WAL at 1733950182049Closed at 1733950182049 2024-12-11T20:49:42,050 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T20:49:42,051 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1733950182024.2326ced8b9e7212a1bb352804b24ffc8.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733950182050"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950182050"}]},"ts":"1733950182050"} 2024-12-11T20:49:42,051 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733950182050"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950182050"}]},"ts":"1733950182050"} 2024-12-11T20:49:42,057 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T20:49:42,058 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T20:49:42,058 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950182058"}]},"ts":"1733950182058"} 2024-12-11T20:49:42,060 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-11T20:49:42,060 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {5be53b084ac7=0} racks are {/default-rack=0} 2024-12-11T20:49:42,061 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T20:49:42,062 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T20:49:42,062 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T20:49:42,062 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T20:49:42,062 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T20:49:42,062 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T20:49:42,062 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T20:49:42,062 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T20:49:42,062 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T20:49:42,062 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T20:49:42,062 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=2326ced8b9e7212a1bb352804b24ffc8, ASSIGN}, {pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=308600b7dd70ac2d1d6971bff5e26a42, ASSIGN}] 2024-12-11T20:49:42,065 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=308600b7dd70ac2d1d6971bff5e26a42, ASSIGN 2024-12-11T20:49:42,066 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=308600b7dd70ac2d1d6971bff5e26a42, ASSIGN; state=OFFLINE, location=5be53b084ac7,40781,1733949995244; forceNewPlan=false, retain=false 2024-12-11T20:49:42,067 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=2326ced8b9e7212a1bb352804b24ffc8, ASSIGN 2024-12-11T20:49:42,067 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=2326ced8b9e7212a1bb352804b24ffc8, ASSIGN; state=OFFLINE, location=5be53b084ac7,39865,1733949995112; forceNewPlan=false, retain=false 2024-12-11T20:49:42,104 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0002_000001 (auth:SIMPLE) from 127.0.0.1:53786 2024-12-11T20:49:42,113 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_3/usercache/jenkins/appcache/application_1733950008312_0002/container_1733950008312_0002_01_000001/launch_container.sh] 2024-12-11T20:49:42,113 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_3/usercache/jenkins/appcache/application_1733950008312_0002/container_1733950008312_0002_01_000001/container_tokens] 2024-12-11T20:49:42,113 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_3/usercache/jenkins/appcache/application_1733950008312_0002/container_1733950008312_0002_01_000001/sysfs] 2024-12-11T20:49:42,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-11T20:49:42,216 INFO [5be53b084ac7:41779 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T20:49:42,217 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=2326ced8b9e7212a1bb352804b24ffc8, regionState=OPENING, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:49:42,217 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=308600b7dd70ac2d1d6971bff5e26a42, regionState=OPENING, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:49:42,219 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=2326ced8b9e7212a1bb352804b24ffc8, ASSIGN because future has completed 2024-12-11T20:49:42,219 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=77, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2326ced8b9e7212a1bb352804b24ffc8, server=5be53b084ac7,39865,1733949995112}] 2024-12-11T20:49:42,220 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=308600b7dd70ac2d1d6971bff5e26a42, ASSIGN because future has completed 2024-12-11T20:49:42,220 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=78, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure 308600b7dd70ac2d1d6971bff5e26a42, server=5be53b084ac7,40781,1733949995244}] 2024-12-11T20:49:42,226 DEBUG [master/5be53b084ac7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 6d795aa0ba7ea8a1773cf88e970f7989 changed from -1.0 to 0.0, refreshing cache 2024-12-11T20:49:42,226 DEBUG [master/5be53b084ac7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region d1b71d1f925f71479f96b8b2219aa395 changed from -1.0 to 0.0, refreshing cache 2024-12-11T20:49:42,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-11T20:49:42,375 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42. 2024-12-11T20:49:42,375 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,,1733950182024.2326ced8b9e7212a1bb352804b24ffc8. 2024-12-11T20:49:42,375 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7752): Opening region: {ENCODED => 308600b7dd70ac2d1d6971bff5e26a42, NAME => 'testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T20:49:42,375 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7752): Opening region: {ENCODED => 2326ced8b9e7212a1bb352804b24ffc8, NAME => 'testExportWithResetTtl,,1733950182024.2326ced8b9e7212a1bb352804b24ffc8.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T20:49:42,375 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,,1733950182024.2326ced8b9e7212a1bb352804b24ffc8. service=AccessControlService 2024-12-11T20:49:42,376 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:49:42,376 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42. service=AccessControlService 2024-12-11T20:49:42,376 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 2326ced8b9e7212a1bb352804b24ffc8 2024-12-11T20:49:42,376 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:49:42,376 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1733950182024.2326ced8b9e7212a1bb352804b24ffc8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:49:42,376 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 308600b7dd70ac2d1d6971bff5e26a42 2024-12-11T20:49:42,376 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7794): checking encryption for 2326ced8b9e7212a1bb352804b24ffc8 2024-12-11T20:49:42,376 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:49:42,376 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7797): checking classloading for 2326ced8b9e7212a1bb352804b24ffc8 2024-12-11T20:49:42,376 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7794): checking encryption for 308600b7dd70ac2d1d6971bff5e26a42 2024-12-11T20:49:42,376 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7797): checking classloading for 308600b7dd70ac2d1d6971bff5e26a42 2024-12-11T20:49:42,378 INFO [StoreOpener-308600b7dd70ac2d1d6971bff5e26a42-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 308600b7dd70ac2d1d6971bff5e26a42 2024-12-11T20:49:42,378 INFO [StoreOpener-2326ced8b9e7212a1bb352804b24ffc8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 2326ced8b9e7212a1bb352804b24ffc8 2024-12-11T20:49:42,379 INFO [StoreOpener-308600b7dd70ac2d1d6971bff5e26a42-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 308600b7dd70ac2d1d6971bff5e26a42 columnFamilyName cf 2024-12-11T20:49:42,379 INFO [StoreOpener-2326ced8b9e7212a1bb352804b24ffc8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2326ced8b9e7212a1bb352804b24ffc8 columnFamilyName cf 2024-12-11T20:49:42,379 DEBUG [StoreOpener-308600b7dd70ac2d1d6971bff5e26a42-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:49:42,379 DEBUG [StoreOpener-2326ced8b9e7212a1bb352804b24ffc8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:49:42,380 INFO [StoreOpener-308600b7dd70ac2d1d6971bff5e26a42-1 {}] regionserver.HStore(327): Store=308600b7dd70ac2d1d6971bff5e26a42/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:49:42,380 INFO [StoreOpener-2326ced8b9e7212a1bb352804b24ffc8-1 {}] regionserver.HStore(327): Store=2326ced8b9e7212a1bb352804b24ffc8/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:49:42,380 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1038): replaying wal for 2326ced8b9e7212a1bb352804b24ffc8 2024-12-11T20:49:42,380 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1038): replaying wal for 308600b7dd70ac2d1d6971bff5e26a42 2024-12-11T20:49:42,381 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/308600b7dd70ac2d1d6971bff5e26a42 2024-12-11T20:49:42,381 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/2326ced8b9e7212a1bb352804b24ffc8 2024-12-11T20:49:42,381 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/308600b7dd70ac2d1d6971bff5e26a42 2024-12-11T20:49:42,382 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/2326ced8b9e7212a1bb352804b24ffc8 2024-12-11T20:49:42,382 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1048): stopping wal replay for 308600b7dd70ac2d1d6971bff5e26a42 2024-12-11T20:49:42,382 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1060): Cleaning up temporary data for 308600b7dd70ac2d1d6971bff5e26a42 2024-12-11T20:49:42,382 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1048): stopping wal replay for 2326ced8b9e7212a1bb352804b24ffc8 2024-12-11T20:49:42,382 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1060): Cleaning up temporary data for 2326ced8b9e7212a1bb352804b24ffc8 2024-12-11T20:49:42,384 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1093): writing seq id for 308600b7dd70ac2d1d6971bff5e26a42 2024-12-11T20:49:42,384 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1093): writing seq id for 2326ced8b9e7212a1bb352804b24ffc8 2024-12-11T20:49:42,389 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/308600b7dd70ac2d1d6971bff5e26a42/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:49:42,390 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1114): Opened 308600b7dd70ac2d1d6971bff5e26a42; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68148500, jitterRate=0.015491783618927002}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:49:42,390 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 308600b7dd70ac2d1d6971bff5e26a42 2024-12-11T20:49:42,390 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/2326ced8b9e7212a1bb352804b24ffc8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:49:42,391 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1006): Region open journal for 308600b7dd70ac2d1d6971bff5e26a42: Running coprocessor pre-open hook at 1733950182376Writing region info on filesystem at 1733950182376Initializing all the Stores at 1733950182377 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950182377Cleaning up temporary data from old regions at 1733950182382 (+5 ms)Running coprocessor post-open hooks at 1733950182390 (+8 ms)Region opened successfully at 1733950182390 2024-12-11T20:49:42,391 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1114): Opened 2326ced8b9e7212a1bb352804b24ffc8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60411587, jitterRate=-0.09979720413684845}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:49:42,391 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2326ced8b9e7212a1bb352804b24ffc8 2024-12-11T20:49:42,391 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42., pid=78, masterSystemTime=1733950182372 2024-12-11T20:49:42,391 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1006): Region open journal for 2326ced8b9e7212a1bb352804b24ffc8: Running coprocessor pre-open hook at 1733950182376Writing region info on filesystem at 1733950182376Initializing all the Stores at 1733950182377 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950182377Cleaning up temporary data from old regions at 1733950182382 (+5 ms)Running coprocessor post-open hooks at 1733950182391 (+9 ms)Region opened successfully at 1733950182391 2024-12-11T20:49:42,393 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,,1733950182024.2326ced8b9e7212a1bb352804b24ffc8., pid=77, masterSystemTime=1733950182371 2024-12-11T20:49:42,395 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42. 2024-12-11T20:49:42,395 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42. 2024-12-11T20:49:42,395 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=308600b7dd70ac2d1d6971bff5e26a42, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:49:42,397 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,,1733950182024.2326ced8b9e7212a1bb352804b24ffc8. 2024-12-11T20:49:42,397 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,,1733950182024.2326ced8b9e7212a1bb352804b24ffc8. 2024-12-11T20:49:42,398 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=78, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure 308600b7dd70ac2d1d6971bff5e26a42, server=5be53b084ac7,40781,1733949995244 because future has completed 2024-12-11T20:49:42,399 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=2326ced8b9e7212a1bb352804b24ffc8, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:49:42,402 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=77, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2326ced8b9e7212a1bb352804b24ffc8, server=5be53b084ac7,39865,1733949995112 because future has completed 2024-12-11T20:49:42,403 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=78, resume processing ppid=76 2024-12-11T20:49:42,403 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=78, ppid=76, state=SUCCESS, hasLock=false; OpenRegionProcedure 308600b7dd70ac2d1d6971bff5e26a42, server=5be53b084ac7,40781,1733949995244 in 180 msec 2024-12-11T20:49:42,405 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=77, resume processing ppid=75 2024-12-11T20:49:42,405 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=76, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=308600b7dd70ac2d1d6971bff5e26a42, ASSIGN in 341 msec 2024-12-11T20:49:42,405 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=77, ppid=75, state=SUCCESS, hasLock=false; OpenRegionProcedure 2326ced8b9e7212a1bb352804b24ffc8, server=5be53b084ac7,39865,1733949995112 in 183 msec 2024-12-11T20:49:42,408 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=75, resume processing ppid=74 2024-12-11T20:49:42,408 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=75, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=2326ced8b9e7212a1bb352804b24ffc8, ASSIGN in 343 msec 2024-12-11T20:49:42,409 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T20:49:42,409 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950182409"}]},"ts":"1733950182409"} 2024-12-11T20:49:42,411 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-11T20:49:42,412 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T20:49:42,412 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-12-11T20:49:42,415 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-11T20:49:42,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:49:42,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:49:42,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:49:42,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:49:42,516 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T20:49:42,516 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T20:49:42,516 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T20:49:42,516 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T20:49:42,516 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T20:49:42,516 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T20:49:42,516 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T20:49:42,516 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T20:49:42,517 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=74, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportWithResetTtl in 491 msec 2024-12-11T20:49:42,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-11T20:49:42,653 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportWithResetTtl completed 2024-12-11T20:49:42,653 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testExportWithResetTtl get assigned. Timeout = 60000ms 2024-12-11T20:49:42,653 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:49:42,656 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testExportWithResetTtl assigned to meta. Checking AM states. 2024-12-11T20:49:42,656 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:49:42,656 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testExportWithResetTtl assigned. 2024-12-11T20:49:42,656 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T20:49:42,663 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='0f0566ca0226c56fc1a83d221847c93a0', locateType=CURRENT is [region=testExportWithResetTtl,,1733950182024.2326ced8b9e7212a1bb352804b24ffc8., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:49:42,664 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='10e13465207afefe9dfedcaea1476297a', locateType=CURRENT is [region=testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:49:42,665 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='2316de2603e67cddcf59eeddd6a3ce827', locateType=CURRENT is [region=testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:49:42,666 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='3bfe7c9bf058db07c202d6aaf3be32266', locateType=CURRENT is [region=testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:49:42,666 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='46a5ae56ca16750e0bf5368194348914c', locateType=CURRENT is [region=testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:49:42,672 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39865 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,,1733950182024.2326ced8b9e7212a1bb352804b24ffc8. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T20:49:42,675 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40781 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T20:49:42,677 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T20:49:42,679 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-12-11T20:49:42,679 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1733950182024.2326ced8b9e7212a1bb352804b24ffc8. 2024-12-11T20:49:42,679 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:49:42,681 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T20:49:42,685 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T20:49:42,691 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T20:49:42,693 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-11T20:49:42,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733950182693 (current time:1733950182693). 2024-12-11T20:49:42,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-11T20:49:42,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T20:49:42,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2081d1c2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:42,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:49:42,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:49:42,695 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:49:42,695 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:49:42,695 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:49:42,695 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68977187, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:42,696 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:49:42,696 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:49:42,696 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:42,697 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35930, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:49:42,697 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@582fa715, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:42,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:49:42,698 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:49:42,698 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:49:42,699 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51354, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:49:42,700 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779. 2024-12-11T20:49:42,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:49:42,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:42,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:42,701 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:49:42,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@320cef1d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:42,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:49:42,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:49:42,703 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:49:42,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:49:42,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:49:42,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48179cb0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:42,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:49:42,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:49:42,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:42,704 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35948, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:49:42,705 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@107bc3bd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:49:42,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:49:42,706 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:49:42,706 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:49:42,707 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51366, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:49:42,708 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:49:42,709 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:49:42,709 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33828, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:49:42,711 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779. 2024-12-11T20:49:42,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:49:42,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:42,712 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:49:42,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:49:42,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-11T20:49:42,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T20:49:42,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-11T20:49:42,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-12-11T20:49:42,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-11T20:49:42,716 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-12-11T20:49:42,717 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T20:49:42,720 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T20:49:42,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741974_1150 (size=143) 2024-12-11T20:49:42,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741974_1150 (size=143) 2024-12-11T20:49:42,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741974_1150 (size=143) 2024-12-11T20:49:42,737 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T20:49:42,738 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2326ced8b9e7212a1bb352804b24ffc8}, {pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 308600b7dd70ac2d1d6971bff5e26a42}] 2024-12-11T20:49:42,739 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 308600b7dd70ac2d1d6971bff5e26a42 2024-12-11T20:49:42,739 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2326ced8b9e7212a1bb352804b24ffc8 2024-12-11T20:49:42,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-11T20:49:42,891 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=80 2024-12-11T20:49:42,891 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40781 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=81 2024-12-11T20:49:42,891 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42. 2024-12-11T20:49:42,891 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1733950182024.2326ced8b9e7212a1bb352804b24ffc8. 2024-12-11T20:49:42,891 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2902): Flushing 308600b7dd70ac2d1d6971bff5e26a42 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-11T20:49:42,891 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2902): Flushing 2326ced8b9e7212a1bb352804b24ffc8 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-11T20:49:42,906 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/2326ced8b9e7212a1bb352804b24ffc8/.tmp/cf/18ce5862f5b7445f943c8abc6e589814 is 71, key is 021a112ce7d439a6507effda5c3a0989/cf:q/1733950182672/Put/seqid=0 2024-12-11T20:49:42,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741975_1151 (size=5356) 2024-12-11T20:49:42,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741975_1151 (size=5356) 2024-12-11T20:49:42,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741975_1151 (size=5356) 2024-12-11T20:49:42,911 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/2326ced8b9e7212a1bb352804b24ffc8/.tmp/cf/18ce5862f5b7445f943c8abc6e589814 2024-12-11T20:49:42,912 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/308600b7dd70ac2d1d6971bff5e26a42/.tmp/cf/d7ba4274303246a9afc1bfeda22c41c1 is 71, key is 1084969c828ae7bf99c749f6a8af090e/cf:q/1733950182675/Put/seqid=0 2024-12-11T20:49:42,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741976_1152 (size=8256) 2024-12-11T20:49:42,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741976_1152 (size=8256) 2024-12-11T20:49:42,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741976_1152 (size=8256) 2024-12-11T20:49:42,918 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/2326ced8b9e7212a1bb352804b24ffc8/.tmp/cf/18ce5862f5b7445f943c8abc6e589814 as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/2326ced8b9e7212a1bb352804b24ffc8/cf/18ce5862f5b7445f943c8abc6e589814 2024-12-11T20:49:42,918 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/308600b7dd70ac2d1d6971bff5e26a42/.tmp/cf/d7ba4274303246a9afc1bfeda22c41c1 2024-12-11T20:49:42,924 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/2326ced8b9e7212a1bb352804b24ffc8/cf/18ce5862f5b7445f943c8abc6e589814, entries=4, sequenceid=5, filesize=5.2 K 2024-12-11T20:49:42,924 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/308600b7dd70ac2d1d6971bff5e26a42/.tmp/cf/d7ba4274303246a9afc1bfeda22c41c1 as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/308600b7dd70ac2d1d6971bff5e26a42/cf/d7ba4274303246a9afc1bfeda22c41c1 2024-12-11T20:49:42,925 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 2326ced8b9e7212a1bb352804b24ffc8 in 34ms, sequenceid=5, compaction requested=false 2024-12-11T20:49:42,925 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-12-11T20:49:42,926 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2603): Flush status journal for 2326ced8b9e7212a1bb352804b24ffc8: 2024-12-11T20:49:42,926 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1733950182024.2326ced8b9e7212a1bb352804b24ffc8. for snaptb-testExportWithResetTtl completed. 2024-12-11T20:49:42,926 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1733950182024.2326ced8b9e7212a1bb352804b24ffc8.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-11T20:49:42,926 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:49:42,926 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/2326ced8b9e7212a1bb352804b24ffc8/cf/18ce5862f5b7445f943c8abc6e589814] hfiles 2024-12-11T20:49:42,926 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/2326ced8b9e7212a1bb352804b24ffc8/cf/18ce5862f5b7445f943c8abc6e589814 for snapshot=snaptb-testExportWithResetTtl 2024-12-11T20:49:42,931 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/308600b7dd70ac2d1d6971bff5e26a42/cf/d7ba4274303246a9afc1bfeda22c41c1, entries=46, sequenceid=5, filesize=8.1 K 2024-12-11T20:49:42,932 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 308600b7dd70ac2d1d6971bff5e26a42 in 41ms, sequenceid=5, compaction requested=false 2024-12-11T20:49:42,932 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2603): Flush status journal for 308600b7dd70ac2d1d6971bff5e26a42: 2024-12-11T20:49:42,932 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42. for snaptb-testExportWithResetTtl completed. 2024-12-11T20:49:42,932 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-11T20:49:42,932 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:49:42,932 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/308600b7dd70ac2d1d6971bff5e26a42/cf/d7ba4274303246a9afc1bfeda22c41c1] hfiles 2024-12-11T20:49:42,932 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/308600b7dd70ac2d1d6971bff5e26a42/cf/d7ba4274303246a9afc1bfeda22c41c1 for snapshot=snaptb-testExportWithResetTtl 2024-12-11T20:49:42,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741977_1153 (size=100) 2024-12-11T20:49:42,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741977_1153 (size=100) 2024-12-11T20:49:42,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741977_1153 (size=100) 2024-12-11T20:49:42,957 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42. 2024-12-11T20:49:42,957 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=81 2024-12-11T20:49:42,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=81 2024-12-11T20:49:42,957 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 308600b7dd70ac2d1d6971bff5e26a42 2024-12-11T20:49:42,957 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 308600b7dd70ac2d1d6971bff5e26a42 2024-12-11T20:49:42,959 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=81, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 308600b7dd70ac2d1d6971bff5e26a42 in 220 msec 2024-12-11T20:49:42,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741978_1154 (size=100) 2024-12-11T20:49:42,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741978_1154 (size=100) 2024-12-11T20:49:42,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741978_1154 (size=100) 2024-12-11T20:49:42,968 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1733950182024.2326ced8b9e7212a1bb352804b24ffc8. 2024-12-11T20:49:42,968 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-11T20:49:42,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=80 2024-12-11T20:49:42,968 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 2326ced8b9e7212a1bb352804b24ffc8 2024-12-11T20:49:42,969 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2326ced8b9e7212a1bb352804b24ffc8 2024-12-11T20:49:42,972 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=80, resume processing ppid=79 2024-12-11T20:49:42,972 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T20:49:42,972 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=80, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 2326ced8b9e7212a1bb352804b24ffc8 in 232 msec 2024-12-11T20:49:42,973 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T20:49:42,974 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T20:49:42,974 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-12-11T20:49:42,974 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-11T20:49:42,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741979_1155 (size=600) 2024-12-11T20:49:42,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741979_1155 (size=600) 2024-12-11T20:49:42,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741979_1155 (size=600) 2024-12-11T20:49:43,005 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T20:49:43,012 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T20:49:43,013 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-11T20:49:43,014 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T20:49:43,015 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-12-11T20:49:43,016 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=79, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 302 msec 2024-12-11T20:49:43,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-11T20:49:43,033 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl completed 2024-12-11T20:49:43,041 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950183041 2024-12-11T20:49:43,042 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:36717, tgtDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950183041, rawTgtDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950183041, srcFsUri=hdfs://localhost:36717, srcDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:49:43,069 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:36717, inputRoot=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:49:43,069 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_754086280_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950183041, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950183041/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-11T20:49:43,071 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T20:49:43,075 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950183041/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-11T20:49:43,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741980_1156 (size=143) 2024-12-11T20:49:43,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741980_1156 (size=143) 2024-12-11T20:49:43,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741980_1156 (size=143) 2024-12-11T20:49:43,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741981_1157 (size=600) 2024-12-11T20:49:43,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741981_1157 (size=600) 2024-12-11T20:49:43,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741981_1157 (size=600) 2024-12-11T20:49:43,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741982_1158 (size=141) 2024-12-11T20:49:43,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741982_1158 (size=141) 2024-12-11T20:49:43,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741982_1158 (size=141) 2024-12-11T20:49:43,120 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:49:43,120 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:49:43,121 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:49:43,844 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T20:49:43,973 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-11T20:49:43,973 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-11T20:49:43,973 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-11T20:49:43,973 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-11T20:49:43,974 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-11T20:49:44,108 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop-15907756157560042673.jar 2024-12-11T20:49:44,109 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:49:44,109 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:49:44,179 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop-16035490427696462876.jar 2024-12-11T20:49:44,179 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:49:44,179 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:49:44,179 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:49:44,180 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:49:44,180 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:49:44,180 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:49:44,180 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T20:49:44,180 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T20:49:44,181 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T20:49:44,181 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T20:49:44,181 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T20:49:44,181 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T20:49:44,181 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T20:49:44,181 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T20:49:44,182 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T20:49:44,182 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T20:49:44,182 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T20:49:44,182 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:49:44,183 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:49:44,183 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:49:44,183 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:49:44,183 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:49:44,183 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:49:44,184 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:49:44,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741983_1159 (size=131440) 2024-12-11T20:49:44,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741983_1159 (size=131440) 2024-12-11T20:49:44,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741983_1159 (size=131440) 2024-12-11T20:49:44,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741984_1160 (size=4188619) 2024-12-11T20:49:44,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741984_1160 (size=4188619) 2024-12-11T20:49:44,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741984_1160 (size=4188619) 2024-12-11T20:49:44,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741985_1161 (size=1323991) 2024-12-11T20:49:44,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741985_1161 (size=1323991) 2024-12-11T20:49:44,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741985_1161 (size=1323991) 2024-12-11T20:49:44,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741986_1162 (size=903544) 2024-12-11T20:49:44,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741986_1162 (size=903544) 2024-12-11T20:49:44,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741986_1162 (size=903544) 2024-12-11T20:49:44,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741987_1163 (size=8360360) 2024-12-11T20:49:44,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741987_1163 (size=8360360) 2024-12-11T20:49:44,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741987_1163 (size=8360360) 2024-12-11T20:49:44,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741988_1164 (size=6426228) 2024-12-11T20:49:44,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741988_1164 (size=6426228) 2024-12-11T20:49:44,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741988_1164 (size=6426228) 2024-12-11T20:49:44,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741989_1165 (size=1877034) 2024-12-11T20:49:44,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741989_1165 (size=1877034) 2024-12-11T20:49:44,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741989_1165 (size=1877034) 2024-12-11T20:49:44,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741990_1166 (size=77835) 2024-12-11T20:49:44,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741990_1166 (size=77835) 2024-12-11T20:49:44,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741990_1166 (size=77835) 2024-12-11T20:49:44,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741991_1167 (size=30949) 2024-12-11T20:49:44,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741991_1167 (size=30949) 2024-12-11T20:49:44,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741991_1167 (size=30949) 2024-12-11T20:49:44,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741992_1168 (size=1597213) 2024-12-11T20:49:44,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741992_1168 (size=1597213) 2024-12-11T20:49:44,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741992_1168 (size=1597213) 2024-12-11T20:49:44,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741993_1169 (size=443172) 2024-12-11T20:49:44,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741993_1169 (size=443172) 2024-12-11T20:49:44,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741993_1169 (size=443172) 2024-12-11T20:49:44,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741994_1170 (size=4695811) 2024-12-11T20:49:44,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741994_1170 (size=4695811) 2024-12-11T20:49:44,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741994_1170 (size=4695811) 2024-12-11T20:49:44,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741995_1171 (size=232957) 2024-12-11T20:49:44,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741995_1171 (size=232957) 2024-12-11T20:49:44,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741995_1171 (size=232957) 2024-12-11T20:49:44,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741996_1172 (size=127628) 2024-12-11T20:49:44,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741996_1172 (size=127628) 2024-12-11T20:49:44,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741996_1172 (size=127628) 2024-12-11T20:49:44,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741997_1173 (size=20406) 2024-12-11T20:49:44,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741997_1173 (size=20406) 2024-12-11T20:49:44,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741997_1173 (size=20406) 2024-12-11T20:49:44,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741998_1174 (size=5175431) 2024-12-11T20:49:44,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741998_1174 (size=5175431) 2024-12-11T20:49:44,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741998_1174 (size=5175431) 2024-12-11T20:49:44,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741999_1175 (size=217634) 2024-12-11T20:49:44,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741999_1175 (size=217634) 2024-12-11T20:49:44,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741999_1175 (size=217634) 2024-12-11T20:49:44,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742000_1176 (size=1832290) 2024-12-11T20:49:44,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742000_1176 (size=1832290) 2024-12-11T20:49:44,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742000_1176 (size=1832290) 2024-12-11T20:49:44,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742001_1177 (size=322274) 2024-12-11T20:49:44,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742001_1177 (size=322274) 2024-12-11T20:49:44,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742001_1177 (size=322274) 2024-12-11T20:49:44,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742002_1178 (size=503880) 2024-12-11T20:49:44,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742002_1178 (size=503880) 2024-12-11T20:49:44,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742002_1178 (size=503880) 2024-12-11T20:49:44,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742003_1179 (size=29229) 2024-12-11T20:49:44,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742003_1179 (size=29229) 2024-12-11T20:49:44,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742003_1179 (size=29229) 2024-12-11T20:49:44,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742004_1180 (size=24096) 2024-12-11T20:49:44,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742004_1180 (size=24096) 2024-12-11T20:49:44,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742004_1180 (size=24096) 2024-12-11T20:49:45,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742005_1181 (size=111872) 2024-12-11T20:49:45,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742005_1181 (size=111872) 2024-12-11T20:49:45,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742005_1181 (size=111872) 2024-12-11T20:49:45,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742006_1182 (size=45609) 2024-12-11T20:49:45,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742006_1182 (size=45609) 2024-12-11T20:49:45,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742006_1182 (size=45609) 2024-12-11T20:49:45,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742007_1183 (size=136454) 2024-12-11T20:49:45,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742007_1183 (size=136454) 2024-12-11T20:49:45,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742007_1183 (size=136454) 2024-12-11T20:49:45,024 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T20:49:45,026 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-12-11T20:49:45,028 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-11T20:49:45,028 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-11T20:49:45,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742008_1184 (size=427) 2024-12-11T20:49:45,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742008_1184 (size=427) 2024-12-11T20:49:45,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742008_1184 (size=427) 2024-12-11T20:49:45,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742009_1185 (size=21) 2024-12-11T20:49:45,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742009_1185 (size=21) 2024-12-11T20:49:45,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742009_1185 (size=21) 2024-12-11T20:49:45,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742010_1186 (size=303998) 2024-12-11T20:49:45,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742010_1186 (size=303998) 2024-12-11T20:49:45,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742010_1186 (size=303998) 2024-12-11T20:49:45,084 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T20:49:45,084 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T20:49:45,104 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0003_000001 (auth:SIMPLE) from 127.0.0.1:53802 2024-12-11T20:49:49,479 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T20:49:51,705 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0003_000001 (auth:SIMPLE) from 127.0.0.1:44506 2024-12-11T20:49:52,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742011_1187 (size=349696) 2024-12-11T20:49:52,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742011_1187 (size=349696) 2024-12-11T20:49:52,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742011_1187 (size=349696) 2024-12-11T20:49:55,491 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0003_000001 (auth:SIMPLE) from 127.0.0.1:49202 2024-12-11T20:49:55,504 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0003_000001 (auth:SIMPLE) from 127.0.0.1:56108 2024-12-11T20:50:01,783 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T20:50:01,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742012_1188 (size=8256) 2024-12-11T20:50:01,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742012_1188 (size=8256) 2024-12-11T20:50:01,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742012_1188 (size=8256) 2024-12-11T20:50:02,135 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_1/usercache/jenkins/appcache/application_1733950008312_0003/container_1733950008312_0003_01_000002/launch_container.sh] 2024-12-11T20:50:02,135 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_1/usercache/jenkins/appcache/application_1733950008312_0003/container_1733950008312_0003_01_000002/container_tokens] 2024-12-11T20:50:02,135 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_1/usercache/jenkins/appcache/application_1733950008312_0003/container_1733950008312_0003_01_000002/sysfs] 2024-12-11T20:50:03,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742014_1190 (size=5356) 2024-12-11T20:50:03,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742014_1190 (size=5356) 2024-12-11T20:50:03,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742014_1190 (size=5356) 2024-12-11T20:50:03,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742013_1189 (size=22124) 2024-12-11T20:50:03,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742013_1189 (size=22124) 2024-12-11T20:50:03,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742013_1189 (size=22124) 2024-12-11T20:50:03,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742015_1191 (size=462) 2024-12-11T20:50:03,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742015_1191 (size=462) 2024-12-11T20:50:03,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742015_1191 (size=462) 2024-12-11T20:50:03,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742016_1192 (size=22124) 2024-12-11T20:50:03,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742016_1192 (size=22124) 2024-12-11T20:50:03,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742016_1192 (size=22124) 2024-12-11T20:50:03,349 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_2/usercache/jenkins/appcache/application_1733950008312_0003/container_1733950008312_0003_01_000003/launch_container.sh] 2024-12-11T20:50:03,349 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_2/usercache/jenkins/appcache/application_1733950008312_0003/container_1733950008312_0003_01_000003/container_tokens] 2024-12-11T20:50:03,349 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_2/usercache/jenkins/appcache/application_1733950008312_0003/container_1733950008312_0003_01_000003/sysfs] 2024-12-11T20:50:03,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742017_1193 (size=349696) 2024-12-11T20:50:03,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742017_1193 (size=349696) 2024-12-11T20:50:03,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742017_1193 (size=349696) 2024-12-11T20:50:03,774 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0003_000001 (auth:SIMPLE) from 127.0.0.1:59496 2024-12-11T20:50:05,456 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T20:50:05,460 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T20:50:05,481 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb-testExportWithResetTtl 2024-12-11T20:50:05,481 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T20:50:05,483 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T20:50:05,483 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_754086280_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-11T20:50:05,484 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-11T20:50:05,485 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-11T20:50:05,485 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_754086280_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950183041/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950183041/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-11T20:50:05,485 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950183041/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-11T20:50:05,485 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950183041/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-11T20:50:05,504 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-12-11T20:50:05,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=82, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportWithResetTtl 2024-12-11T20:50:05,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-11T20:50:05,511 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950205511"}]},"ts":"1733950205511"} 2024-12-11T20:50:05,516 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-11T20:50:05,516 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-12-11T20:50:05,517 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=83, ppid=82, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-12-11T20:50:05,522 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=2326ced8b9e7212a1bb352804b24ffc8, UNASSIGN}, {pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=308600b7dd70ac2d1d6971bff5e26a42, UNASSIGN}] 2024-12-11T20:50:05,523 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=308600b7dd70ac2d1d6971bff5e26a42, UNASSIGN 2024-12-11T20:50:05,524 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=2326ced8b9e7212a1bb352804b24ffc8, UNASSIGN 2024-12-11T20:50:05,525 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=308600b7dd70ac2d1d6971bff5e26a42, regionState=CLOSING, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:50:05,525 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=2326ced8b9e7212a1bb352804b24ffc8, regionState=CLOSING, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:50:05,530 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=2326ced8b9e7212a1bb352804b24ffc8, UNASSIGN because future has completed 2024-12-11T20:50:05,530 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T20:50:05,530 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=86, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2326ced8b9e7212a1bb352804b24ffc8, server=5be53b084ac7,39865,1733949995112}] 2024-12-11T20:50:05,532 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=308600b7dd70ac2d1d6971bff5e26a42, UNASSIGN because future has completed 2024-12-11T20:50:05,533 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T20:50:05,533 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=87, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure 308600b7dd70ac2d1d6971bff5e26a42, server=5be53b084ac7,40781,1733949995244}] 2024-12-11T20:50:05,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-11T20:50:05,686 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(122): Close 2326ced8b9e7212a1bb352804b24ffc8 2024-12-11T20:50:05,686 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T20:50:05,686 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1722): Closing 2326ced8b9e7212a1bb352804b24ffc8, disabling compactions & flushes 2024-12-11T20:50:05,686 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1733950182024.2326ced8b9e7212a1bb352804b24ffc8. 2024-12-11T20:50:05,686 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1733950182024.2326ced8b9e7212a1bb352804b24ffc8. 2024-12-11T20:50:05,686 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1733950182024.2326ced8b9e7212a1bb352804b24ffc8. after waiting 0 ms 2024-12-11T20:50:05,686 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1733950182024.2326ced8b9e7212a1bb352804b24ffc8. 2024-12-11T20:50:05,688 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(122): Close 308600b7dd70ac2d1d6971bff5e26a42 2024-12-11T20:50:05,688 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T20:50:05,688 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1722): Closing 308600b7dd70ac2d1d6971bff5e26a42, disabling compactions & flushes 2024-12-11T20:50:05,688 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42. 2024-12-11T20:50:05,688 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42. 2024-12-11T20:50:05,688 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42. after waiting 0 ms 2024-12-11T20:50:05,688 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42. 2024-12-11T20:50:05,777 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/2326ced8b9e7212a1bb352804b24ffc8/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-11T20:50:05,778 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:50:05,779 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1733950182024.2326ced8b9e7212a1bb352804b24ffc8. 2024-12-11T20:50:05,779 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1676): Region close journal for 2326ced8b9e7212a1bb352804b24ffc8: Waiting for close lock at 1733950205686Running coprocessor pre-close hooks at 1733950205686Disabling compacts and flushes for region at 1733950205686Disabling writes for close at 1733950205686Writing region close event to WAL at 1733950205715 (+29 ms)Running coprocessor post-close hooks at 1733950205778 (+63 ms)Closed at 1733950205779 (+1 ms) 2024-12-11T20:50:05,782 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(157): Closed 2326ced8b9e7212a1bb352804b24ffc8 2024-12-11T20:50:05,782 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=2326ced8b9e7212a1bb352804b24ffc8, regionState=CLOSED 2024-12-11T20:50:05,784 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/308600b7dd70ac2d1d6971bff5e26a42/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-11T20:50:05,786 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=86, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2326ced8b9e7212a1bb352804b24ffc8, server=5be53b084ac7,39865,1733949995112 because future has completed 2024-12-11T20:50:05,787 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:50:05,787 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42. 2024-12-11T20:50:05,787 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1676): Region close journal for 308600b7dd70ac2d1d6971bff5e26a42: Waiting for close lock at 1733950205688Running coprocessor pre-close hooks at 1733950205688Disabling compacts and flushes for region at 1733950205688Disabling writes for close at 1733950205688Writing region close event to WAL at 1733950205736 (+48 ms)Running coprocessor post-close hooks at 1733950205787 (+51 ms)Closed at 1733950205787 2024-12-11T20:50:05,790 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(157): Closed 308600b7dd70ac2d1d6971bff5e26a42 2024-12-11T20:50:05,791 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=308600b7dd70ac2d1d6971bff5e26a42, regionState=CLOSED 2024-12-11T20:50:05,794 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=86, resume processing ppid=84 2024-12-11T20:50:05,794 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=86, ppid=84, state=SUCCESS, hasLock=false; CloseRegionProcedure 2326ced8b9e7212a1bb352804b24ffc8, server=5be53b084ac7,39865,1733949995112 in 258 msec 2024-12-11T20:50:05,795 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=87, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure 308600b7dd70ac2d1d6971bff5e26a42, server=5be53b084ac7,40781,1733949995244 because future has completed 2024-12-11T20:50:05,797 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=84, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=2326ced8b9e7212a1bb352804b24ffc8, UNASSIGN in 273 msec 2024-12-11T20:50:05,802 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=87, resume processing ppid=85 2024-12-11T20:50:05,803 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=87, ppid=85, state=SUCCESS, hasLock=false; CloseRegionProcedure 308600b7dd70ac2d1d6971bff5e26a42, server=5be53b084ac7,40781,1733949995244 in 264 msec 2024-12-11T20:50:05,806 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=85, resume processing ppid=83 2024-12-11T20:50:05,806 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=85, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=308600b7dd70ac2d1d6971bff5e26a42, UNASSIGN in 279 msec 2024-12-11T20:50:05,816 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=83, resume processing ppid=82 2024-12-11T20:50:05,816 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=83, ppid=82, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl in 291 msec 2024-12-11T20:50:05,819 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950205818"}]},"ts":"1733950205818"} 2024-12-11T20:50:05,822 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-11T20:50:05,822 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-12-11T20:50:05,831 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=82, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportWithResetTtl in 319 msec 2024-12-11T20:50:05,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-11T20:50:05,834 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportWithResetTtl completed 2024-12-11T20:50:05,834 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-12-11T20:50:05,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl 2024-12-11T20:50:05,837 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-11T20:50:05,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] access.PermissionStorage(261): Removing permissions of removed table testExportWithResetTtl 2024-12-11T20:50:05,840 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=88, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-11T20:50:05,844 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-12-11T20:50:05,851 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/2326ced8b9e7212a1bb352804b24ffc8 2024-12-11T20:50:05,854 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/2326ced8b9e7212a1bb352804b24ffc8/cf, FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/2326ced8b9e7212a1bb352804b24ffc8/recovered.edits] 2024-12-11T20:50:05,861 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/2326ced8b9e7212a1bb352804b24ffc8/cf/18ce5862f5b7445f943c8abc6e589814 to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testExportWithResetTtl/2326ced8b9e7212a1bb352804b24ffc8/cf/18ce5862f5b7445f943c8abc6e589814 2024-12-11T20:50:05,867 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/308600b7dd70ac2d1d6971bff5e26a42 2024-12-11T20:50:05,875 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/2326ced8b9e7212a1bb352804b24ffc8/recovered.edits/8.seqid to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testExportWithResetTtl/2326ced8b9e7212a1bb352804b24ffc8/recovered.edits/8.seqid 2024-12-11T20:50:05,877 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/308600b7dd70ac2d1d6971bff5e26a42/cf, FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/308600b7dd70ac2d1d6971bff5e26a42/recovered.edits] 2024-12-11T20:50:05,878 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/2326ced8b9e7212a1bb352804b24ffc8 2024-12-11T20:50:05,885 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/308600b7dd70ac2d1d6971bff5e26a42/cf/d7ba4274303246a9afc1bfeda22c41c1 to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testExportWithResetTtl/308600b7dd70ac2d1d6971bff5e26a42/cf/d7ba4274303246a9afc1bfeda22c41c1 2024-12-11T20:50:05,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-11T20:50:05,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-11T20:50:05,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-11T20:50:05,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-11T20:50:05,893 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/308600b7dd70ac2d1d6971bff5e26a42/recovered.edits/8.seqid to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testExportWithResetTtl/308600b7dd70ac2d1d6971bff5e26a42/recovered.edits/8.seqid 2024-12-11T20:50:05,894 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-11T20:50:05,894 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-11T20:50:05,894 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportWithResetTtl/308600b7dd70ac2d1d6971bff5e26a42 2024-12-11T20:50:05,894 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-12-11T20:50:05,894 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-11T20:50:05,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-11T20:50:05,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:50:05,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:50:05,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-11T20:50:05,901 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data null 2024-12-11T20:50:05,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:50:05,901 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-11T20:50:05,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-11T20:50:05,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:50:05,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=88 2024-12-11T20:50:05,905 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T20:50:05,905 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T20:50:05,906 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T20:50:05,906 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T20:50:05,909 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=88, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-11T20:50:05,913 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-12-11T20:50:05,916 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'testExportWithResetTtl' descriptor. 2024-12-11T20:50:05,918 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=88, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-11T20:50:05,918 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'testExportWithResetTtl' from region states. 2024-12-11T20:50:05,918 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1733950182024.2326ced8b9e7212a1bb352804b24ffc8.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733950205918"}]},"ts":"9223372036854775807"} 2024-12-11T20:50:05,918 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733950205918"}]},"ts":"9223372036854775807"} 2024-12-11T20:50:05,921 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T20:50:05,921 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 2326ced8b9e7212a1bb352804b24ffc8, NAME => 'testExportWithResetTtl,,1733950182024.2326ced8b9e7212a1bb352804b24ffc8.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 308600b7dd70ac2d1d6971bff5e26a42, NAME => 'testExportWithResetTtl,1,1733950182024.308600b7dd70ac2d1d6971bff5e26a42.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T20:50:05,922 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'testExportWithResetTtl' as deleted. 2024-12-11T20:50:05,923 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733950205922"}]},"ts":"9223372036854775807"} 2024-12-11T20:50:05,926 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testExportWithResetTtl state from META 2024-12-11T20:50:05,927 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=88, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-11T20:50:05,928 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=88, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl in 93 msec 2024-12-11T20:50:06,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=88 2024-12-11T20:50:06,015 DEBUG [Time-limited test {}] client.AsyncRegionLocator(219): Clear meta cache for testExportWithResetTtl 2024-12-11T20:50:06,015 INFO [Time-limited test {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportWithResetTtl completed 2024-12-11T20:50:06,018 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-12-11T20:50:06,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=89, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-12-11T20:50:06,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-11T20:50:06,029 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950206028"}]},"ts":"1733950206028"} 2024-12-11T20:50:06,032 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-11T20:50:06,032 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-12-11T20:50:06,033 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-12-11T20:50:06,042 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=d1b71d1f925f71479f96b8b2219aa395, UNASSIGN}, {pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=6d795aa0ba7ea8a1773cf88e970f7989, UNASSIGN}] 2024-12-11T20:50:06,048 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=6d795aa0ba7ea8a1773cf88e970f7989, UNASSIGN 2024-12-11T20:50:06,048 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=d1b71d1f925f71479f96b8b2219aa395, UNASSIGN 2024-12-11T20:50:06,054 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=d1b71d1f925f71479f96b8b2219aa395, regionState=CLOSING, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:50:06,054 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=6d795aa0ba7ea8a1773cf88e970f7989, regionState=CLOSING, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:50:06,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=d1b71d1f925f71479f96b8b2219aa395, UNASSIGN because future has completed 2024-12-11T20:50:06,057 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T20:50:06,057 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=93, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure d1b71d1f925f71479f96b8b2219aa395, server=5be53b084ac7,40781,1733949995244}] 2024-12-11T20:50:06,059 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=6d795aa0ba7ea8a1773cf88e970f7989, UNASSIGN because future has completed 2024-12-11T20:50:06,060 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T20:50:06,060 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=94, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6d795aa0ba7ea8a1773cf88e970f7989, server=5be53b084ac7,40355,1733949994820}] 2024-12-11T20:50:06,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-11T20:50:06,213 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(122): Close d1b71d1f925f71479f96b8b2219aa395 2024-12-11T20:50:06,213 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T20:50:06,213 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1722): Closing d1b71d1f925f71479f96b8b2219aa395, disabling compactions & flushes 2024-12-11T20:50:06,213 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395. 2024-12-11T20:50:06,213 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395. 2024-12-11T20:50:06,213 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395. after waiting 0 ms 2024-12-11T20:50:06,213 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395. 2024-12-11T20:50:06,216 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(122): Close 6d795aa0ba7ea8a1773cf88e970f7989 2024-12-11T20:50:06,217 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T20:50:06,217 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1722): Closing 6d795aa0ba7ea8a1773cf88e970f7989, disabling compactions & flushes 2024-12-11T20:50:06,217 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989. 2024-12-11T20:50:06,217 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989. 2024-12-11T20:50:06,217 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989. after waiting 0 ms 2024-12-11T20:50:06,217 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989. 2024-12-11T20:50:06,261 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/d1b71d1f925f71479f96b8b2219aa395/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T20:50:06,262 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:50:06,262 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395. 2024-12-11T20:50:06,262 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1676): Region close journal for d1b71d1f925f71479f96b8b2219aa395: Waiting for close lock at 1733950206213Running coprocessor pre-close hooks at 1733950206213Disabling compacts and flushes for region at 1733950206213Disabling writes for close at 1733950206213Writing region close event to WAL at 1733950206248 (+35 ms)Running coprocessor post-close hooks at 1733950206262 (+14 ms)Closed at 1733950206262 2024-12-11T20:50:06,271 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(157): Closed d1b71d1f925f71479f96b8b2219aa395 2024-12-11T20:50:06,272 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=d1b71d1f925f71479f96b8b2219aa395, regionState=CLOSED 2024-12-11T20:50:06,275 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=93, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure d1b71d1f925f71479f96b8b2219aa395, server=5be53b084ac7,40781,1733949995244 because future has completed 2024-12-11T20:50:06,288 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=93, resume processing ppid=91 2024-12-11T20:50:06,288 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=93, ppid=91, state=SUCCESS, hasLock=false; CloseRegionProcedure d1b71d1f925f71479f96b8b2219aa395, server=5be53b084ac7,40781,1733949995244 in 223 msec 2024-12-11T20:50:06,293 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=91, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=d1b71d1f925f71479f96b8b2219aa395, UNASSIGN in 246 msec 2024-12-11T20:50:06,334 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/6d795aa0ba7ea8a1773cf88e970f7989/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T20:50:06,335 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:50:06,335 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989. 2024-12-11T20:50:06,335 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1676): Region close journal for 6d795aa0ba7ea8a1773cf88e970f7989: Waiting for close lock at 1733950206217Running coprocessor pre-close hooks at 1733950206217Disabling compacts and flushes for region at 1733950206217Disabling writes for close at 1733950206217Writing region close event to WAL at 1733950206280 (+63 ms)Running coprocessor post-close hooks at 1733950206335 (+55 ms)Closed at 1733950206335 2024-12-11T20:50:06,338 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(157): Closed 6d795aa0ba7ea8a1773cf88e970f7989 2024-12-11T20:50:06,339 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=6d795aa0ba7ea8a1773cf88e970f7989, regionState=CLOSED 2024-12-11T20:50:06,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-11T20:50:06,344 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=94, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6d795aa0ba7ea8a1773cf88e970f7989, server=5be53b084ac7,40355,1733949994820 because future has completed 2024-12-11T20:50:06,351 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=94, resume processing ppid=92 2024-12-11T20:50:06,351 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=94, ppid=92, state=SUCCESS, hasLock=false; CloseRegionProcedure 6d795aa0ba7ea8a1773cf88e970f7989, server=5be53b084ac7,40355,1733949994820 in 287 msec 2024-12-11T20:50:06,356 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=92, resume processing ppid=90 2024-12-11T20:50:06,356 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=92, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=6d795aa0ba7ea8a1773cf88e970f7989, UNASSIGN in 309 msec 2024-12-11T20:50:06,375 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=90, resume processing ppid=89 2024-12-11T20:50:06,375 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=90, ppid=89, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 324 msec 2024-12-11T20:50:06,379 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950206379"}]},"ts":"1733950206379"} 2024-12-11T20:50:06,384 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-11T20:50:06,384 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-12-11T20:50:06,390 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=89, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl in 368 msec 2024-12-11T20:50:06,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-11T20:50:06,654 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-11T20:50:06,654 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-12-11T20:50:06,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-11T20:50:06,664 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-11T20:50:06,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithResetTtl 2024-12-11T20:50:06,667 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=95, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-11T20:50:06,672 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-12-11T20:50:06,676 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/d1b71d1f925f71479f96b8b2219aa395 2024-12-11T20:50:06,676 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/6d795aa0ba7ea8a1773cf88e970f7989 2024-12-11T20:50:06,685 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/d1b71d1f925f71479f96b8b2219aa395/cf, FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/d1b71d1f925f71479f96b8b2219aa395/recovered.edits] 2024-12-11T20:50:06,685 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/6d795aa0ba7ea8a1773cf88e970f7989/cf, FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/6d795aa0ba7ea8a1773cf88e970f7989/recovered.edits] 2024-12-11T20:50:06,693 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/d1b71d1f925f71479f96b8b2219aa395/cf/629cc8c4610a4d0ea754dab20c44e769 to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportWithResetTtl/d1b71d1f925f71479f96b8b2219aa395/cf/629cc8c4610a4d0ea754dab20c44e769 2024-12-11T20:50:06,703 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/6d795aa0ba7ea8a1773cf88e970f7989/cf/734fd0a4e15d49129bffbd9d6a27a8ee to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportWithResetTtl/6d795aa0ba7ea8a1773cf88e970f7989/cf/734fd0a4e15d49129bffbd9d6a27a8ee 2024-12-11T20:50:06,707 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/d1b71d1f925f71479f96b8b2219aa395/recovered.edits/9.seqid to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportWithResetTtl/d1b71d1f925f71479f96b8b2219aa395/recovered.edits/9.seqid 2024-12-11T20:50:06,708 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/d1b71d1f925f71479f96b8b2219aa395 2024-12-11T20:50:06,711 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/6d795aa0ba7ea8a1773cf88e970f7989/recovered.edits/9.seqid to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportWithResetTtl/6d795aa0ba7ea8a1773cf88e970f7989/recovered.edits/9.seqid 2024-12-11T20:50:06,712 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithResetTtl/6d795aa0ba7ea8a1773cf88e970f7989 2024-12-11T20:50:06,714 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-12-11T20:50:06,724 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=95, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-11T20:50:06,728 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-12-11T20:50:06,731 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-12-11T20:50:06,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-11T20:50:06,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-11T20:50:06,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-11T20:50:06,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-11T20:50:06,734 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-11T20:50:06,735 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-11T20:50:06,735 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-11T20:50:06,735 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-11T20:50:06,736 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=95, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-11T20:50:06,736 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithResetTtl' from region states. 2024-12-11T20:50:06,736 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733950206736"}]},"ts":"9223372036854775807"} 2024-12-11T20:50:06,736 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733950206736"}]},"ts":"9223372036854775807"} 2024-12-11T20:50:06,739 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T20:50:06,739 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => d1b71d1f925f71479f96b8b2219aa395, NAME => 'testtb-testExportWithResetTtl,,1733950178895.d1b71d1f925f71479f96b8b2219aa395.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 6d795aa0ba7ea8a1773cf88e970f7989, NAME => 'testtb-testExportWithResetTtl,1,1733950178895.6d795aa0ba7ea8a1773cf88e970f7989.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T20:50:06,739 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-12-11T20:50:06,739 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733950206739"}]},"ts":"9223372036854775807"} 2024-12-11T20:50:06,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-11T20:50:06,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:50:06,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-11T20:50:06,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:50:06,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-11T20:50:06,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:50:06,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-11T20:50:06,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:50:06,743 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithResetTtl state from META 2024-12-11T20:50:06,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-12-11T20:50:06,746 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=95, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-11T20:50:06,760 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=95, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl in 92 msec 2024-12-11T20:50:06,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-12-11T20:50:06,854 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithResetTtl 2024-12-11T20:50:06,854 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-11T20:50:06,881 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" type: DISABLED 2024-12-11T20:50:06,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-12-11T20:50:06,887 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" type: DISABLED 2024-12-11T20:50:06,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb-testExportWithResetTtl 2024-12-11T20:50:06,898 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" type: DISABLED 2024-12-11T20:50:06,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-12-11T20:50:06,941 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=797 (was 783) Potentially hanging thread: Thread-2853 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1268779428_1 at /127.0.0.1:40244 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (166867004) connection to localhost/127.0.0.1:39601 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MoveIntermediateToDone Thread #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_754086280_22 at /127.0.0.1:52278 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_754086280_22 at /127.0.0.1:51212 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39601 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1268779428_1 at /127.0.0.1:51202 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 114279) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_754086280_22 at /127.0.0.1:40280 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=799 (was 796) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=953 (was 869) - SystemLoadAverage LEAK? -, ProcessCount=29 (was 26) - ProcessCount LEAK? -, AvailableMemoryMB=1386 (was 1408) 2024-12-11T20:50:06,941 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=797 is superior to 500 2024-12-11T20:50:07,001 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=797, OpenFileDescriptor=801, MaxFileDescriptor=1048576, SystemLoadAverage=953, ProcessCount=29, AvailableMemoryMB=1377 2024-12-11T20:50:07,001 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=797 is superior to 500 2024-12-11T20:50:07,003 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T20:50:07,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState 2024-12-11T20:50:07,009 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T20:50:07,010 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:50:07,012 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 96 2024-12-11T20:50:07,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-11T20:50:07,014 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T20:50:07,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-11T20:50:07,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742018_1194 (size=407) 2024-12-11T20:50:07,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742018_1194 (size=407) 2024-12-11T20:50:07,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742018_1194 (size=407) 2024-12-11T20:50:07,162 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7a75fd46402d52e1a28d20ff753c678b, NAME => 'testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:50:07,180 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1946b6c529d923df30924eb1b6d5a6d3, NAME => 'testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:50:07,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-11T20:50:07,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742019_1195 (size=68) 2024-12-11T20:50:07,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742019_1195 (size=68) 2024-12-11T20:50:07,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742019_1195 (size=68) 2024-12-11T20:50:07,390 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:50:07,390 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 7a75fd46402d52e1a28d20ff753c678b, disabling compactions & flushes 2024-12-11T20:50:07,390 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b. 2024-12-11T20:50:07,390 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b. 2024-12-11T20:50:07,391 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b. after waiting 0 ms 2024-12-11T20:50:07,391 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b. 2024-12-11T20:50:07,391 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b. 2024-12-11T20:50:07,391 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7a75fd46402d52e1a28d20ff753c678b: Waiting for close lock at 1733950207390Disabling compacts and flushes for region at 1733950207390Disabling writes for close at 1733950207391 (+1 ms)Writing region close event to WAL at 1733950207391Closed at 1733950207391 2024-12-11T20:50:07,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742020_1196 (size=68) 2024-12-11T20:50:07,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742020_1196 (size=68) 2024-12-11T20:50:07,418 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:50:07,418 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 1946b6c529d923df30924eb1b6d5a6d3, disabling compactions & flushes 2024-12-11T20:50:07,418 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3. 2024-12-11T20:50:07,418 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3. 2024-12-11T20:50:07,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742020_1196 (size=68) 2024-12-11T20:50:07,418 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3. after waiting 0 ms 2024-12-11T20:50:07,418 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3. 2024-12-11T20:50:07,418 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3. 2024-12-11T20:50:07,418 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 1946b6c529d923df30924eb1b6d5a6d3: Waiting for close lock at 1733950207418Disabling compacts and flushes for region at 1733950207418Disabling writes for close at 1733950207418Writing region close event to WAL at 1733950207418Closed at 1733950207418 2024-12-11T20:50:07,424 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T20:50:07,424 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733950207424"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950207424"}]},"ts":"1733950207424"} 2024-12-11T20:50:07,425 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733950207424"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950207424"}]},"ts":"1733950207424"} 2024-12-11T20:50:07,430 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T20:50:07,436 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T20:50:07,436 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950207436"}]},"ts":"1733950207436"} 2024-12-11T20:50:07,440 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-12-11T20:50:07,440 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {5be53b084ac7=0} racks are {/default-rack=0} 2024-12-11T20:50:07,442 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T20:50:07,442 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T20:50:07,442 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T20:50:07,442 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T20:50:07,442 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T20:50:07,442 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T20:50:07,442 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T20:50:07,443 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T20:50:07,443 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T20:50:07,443 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T20:50:07,443 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=7a75fd46402d52e1a28d20ff753c678b, ASSIGN}, {pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=1946b6c529d923df30924eb1b6d5a6d3, ASSIGN}] 2024-12-11T20:50:07,451 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=1946b6c529d923df30924eb1b6d5a6d3, ASSIGN 2024-12-11T20:50:07,452 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=7a75fd46402d52e1a28d20ff753c678b, ASSIGN 2024-12-11T20:50:07,455 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=1946b6c529d923df30924eb1b6d5a6d3, ASSIGN; state=OFFLINE, location=5be53b084ac7,40355,1733949994820; forceNewPlan=false, retain=false 2024-12-11T20:50:07,456 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=7a75fd46402d52e1a28d20ff753c678b, ASSIGN; state=OFFLINE, location=5be53b084ac7,40781,1733949995244; forceNewPlan=false, retain=false 2024-12-11T20:50:07,606 INFO [5be53b084ac7:41779 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T20:50:07,607 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=7a75fd46402d52e1a28d20ff753c678b, regionState=OPENING, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:50:07,608 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=1946b6c529d923df30924eb1b6d5a6d3, regionState=OPENING, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:50:07,611 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=7a75fd46402d52e1a28d20ff753c678b, ASSIGN because future has completed 2024-12-11T20:50:07,613 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=99, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7a75fd46402d52e1a28d20ff753c678b, server=5be53b084ac7,40781,1733949995244}] 2024-12-11T20:50:07,617 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=1946b6c529d923df30924eb1b6d5a6d3, ASSIGN because future has completed 2024-12-11T20:50:07,618 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=100, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1946b6c529d923df30924eb1b6d5a6d3, server=5be53b084ac7,40355,1733949994820}] 2024-12-11T20:50:07,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-11T20:50:07,779 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3. 2024-12-11T20:50:07,779 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7752): Opening region: {ENCODED => 1946b6c529d923df30924eb1b6d5a6d3, NAME => 'testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T20:50:07,780 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3. service=AccessControlService 2024-12-11T20:50:07,780 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:50:07,780 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 1946b6c529d923df30924eb1b6d5a6d3 2024-12-11T20:50:07,780 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:50:07,780 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7794): checking encryption for 1946b6c529d923df30924eb1b6d5a6d3 2024-12-11T20:50:07,780 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7797): checking classloading for 1946b6c529d923df30924eb1b6d5a6d3 2024-12-11T20:50:07,784 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b. 2024-12-11T20:50:07,784 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7752): Opening region: {ENCODED => 7a75fd46402d52e1a28d20ff753c678b, NAME => 'testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T20:50:07,784 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b. service=AccessControlService 2024-12-11T20:50:07,785 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:50:07,785 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 7a75fd46402d52e1a28d20ff753c678b 2024-12-11T20:50:07,785 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:50:07,785 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7794): checking encryption for 7a75fd46402d52e1a28d20ff753c678b 2024-12-11T20:50:07,785 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7797): checking classloading for 7a75fd46402d52e1a28d20ff753c678b 2024-12-11T20:50:07,795 INFO [StoreOpener-7a75fd46402d52e1a28d20ff753c678b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7a75fd46402d52e1a28d20ff753c678b 2024-12-11T20:50:07,800 INFO [StoreOpener-7a75fd46402d52e1a28d20ff753c678b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7a75fd46402d52e1a28d20ff753c678b columnFamilyName cf 2024-12-11T20:50:07,800 DEBUG [StoreOpener-7a75fd46402d52e1a28d20ff753c678b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:50:07,802 INFO [StoreOpener-7a75fd46402d52e1a28d20ff753c678b-1 {}] regionserver.HStore(327): Store=7a75fd46402d52e1a28d20ff753c678b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:50:07,802 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1038): replaying wal for 7a75fd46402d52e1a28d20ff753c678b 2024-12-11T20:50:07,803 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/7a75fd46402d52e1a28d20ff753c678b 2024-12-11T20:50:07,804 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/7a75fd46402d52e1a28d20ff753c678b 2024-12-11T20:50:07,806 INFO [StoreOpener-1946b6c529d923df30924eb1b6d5a6d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1946b6c529d923df30924eb1b6d5a6d3 2024-12-11T20:50:07,809 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1048): stopping wal replay for 7a75fd46402d52e1a28d20ff753c678b 2024-12-11T20:50:07,809 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1060): Cleaning up temporary data for 7a75fd46402d52e1a28d20ff753c678b 2024-12-11T20:50:07,811 INFO [StoreOpener-1946b6c529d923df30924eb1b6d5a6d3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1946b6c529d923df30924eb1b6d5a6d3 columnFamilyName cf 2024-12-11T20:50:07,812 DEBUG [StoreOpener-1946b6c529d923df30924eb1b6d5a6d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:50:07,812 INFO [StoreOpener-1946b6c529d923df30924eb1b6d5a6d3-1 {}] regionserver.HStore(327): Store=1946b6c529d923df30924eb1b6d5a6d3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:50:07,813 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1038): replaying wal for 1946b6c529d923df30924eb1b6d5a6d3 2024-12-11T20:50:07,814 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/1946b6c529d923df30924eb1b6d5a6d3 2024-12-11T20:50:07,814 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/1946b6c529d923df30924eb1b6d5a6d3 2024-12-11T20:50:07,815 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1048): stopping wal replay for 1946b6c529d923df30924eb1b6d5a6d3 2024-12-11T20:50:07,815 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1060): Cleaning up temporary data for 1946b6c529d923df30924eb1b6d5a6d3 2024-12-11T20:50:07,817 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1093): writing seq id for 1946b6c529d923df30924eb1b6d5a6d3 2024-12-11T20:50:07,819 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1093): writing seq id for 7a75fd46402d52e1a28d20ff753c678b 2024-12-11T20:50:07,847 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/1946b6c529d923df30924eb1b6d5a6d3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:50:07,848 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1114): Opened 1946b6c529d923df30924eb1b6d5a6d3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70808816, jitterRate=0.05513358116149902}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:50:07,848 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1946b6c529d923df30924eb1b6d5a6d3 2024-12-11T20:50:07,849 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1006): Region open journal for 1946b6c529d923df30924eb1b6d5a6d3: Running coprocessor pre-open hook at 1733950207781Writing region info on filesystem at 1733950207781Initializing all the Stores at 1733950207782 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950207782Cleaning up temporary data from old regions at 1733950207815 (+33 ms)Running coprocessor post-open hooks at 1733950207848 (+33 ms)Region opened successfully at 1733950207849 (+1 ms) 2024-12-11T20:50:07,850 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3., pid=100, masterSystemTime=1733950207774 2024-12-11T20:50:07,855 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3. 2024-12-11T20:50:07,855 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3. 2024-12-11T20:50:07,856 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=1946b6c529d923df30924eb1b6d5a6d3, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:50:07,861 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=100, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1946b6c529d923df30924eb1b6d5a6d3, server=5be53b084ac7,40355,1733949994820 because future has completed 2024-12-11T20:50:07,869 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=100, resume processing ppid=98 2024-12-11T20:50:07,869 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=100, ppid=98, state=SUCCESS, hasLock=false; OpenRegionProcedure 1946b6c529d923df30924eb1b6d5a6d3, server=5be53b084ac7,40355,1733949994820 in 247 msec 2024-12-11T20:50:07,872 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=98, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=1946b6c529d923df30924eb1b6d5a6d3, ASSIGN in 426 msec 2024-12-11T20:50:07,891 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/7a75fd46402d52e1a28d20ff753c678b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:50:07,892 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1114): Opened 7a75fd46402d52e1a28d20ff753c678b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64985246, jitterRate=-0.03164437413215637}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:50:07,892 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7a75fd46402d52e1a28d20ff753c678b 2024-12-11T20:50:07,892 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1006): Region open journal for 7a75fd46402d52e1a28d20ff753c678b: Running coprocessor pre-open hook at 1733950207785Writing region info on filesystem at 1733950207785Initializing all the Stores at 1733950207787 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950207787Cleaning up temporary data from old regions at 1733950207809 (+22 ms)Running coprocessor post-open hooks at 1733950207892 (+83 ms)Region opened successfully at 1733950207892 2024-12-11T20:50:07,893 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b., pid=99, masterSystemTime=1733950207771 2024-12-11T20:50:07,897 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b. 2024-12-11T20:50:07,897 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b. 2024-12-11T20:50:07,897 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=7a75fd46402d52e1a28d20ff753c678b, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:50:07,905 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=99, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7a75fd46402d52e1a28d20ff753c678b, server=5be53b084ac7,40781,1733949995244 because future has completed 2024-12-11T20:50:07,910 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=99, resume processing ppid=97 2024-12-11T20:50:07,910 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=99, ppid=97, state=SUCCESS, hasLock=false; OpenRegionProcedure 7a75fd46402d52e1a28d20ff753c678b, server=5be53b084ac7,40781,1733949995244 in 293 msec 2024-12-11T20:50:07,917 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T20:50:07,917 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950207917"}]},"ts":"1733950207917"} 2024-12-11T20:50:07,921 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-12-11T20:50:07,923 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T20:50:07,924 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-12-11T20:50:07,925 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=97, resume processing ppid=96 2024-12-11T20:50:07,925 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=97, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=7a75fd46402d52e1a28d20ff753c678b, ASSIGN in 467 msec 2024-12-11T20:50:07,928 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-11T20:50:08,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:50:08,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:50:08,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:50:08,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:50:08,025 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-11T20:50:08,027 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-11T20:50:08,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-11T20:50:08,027 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-11T20:50:08,029 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-11T20:50:08,029 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=96, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState in 1.0230 sec 2024-12-11T20:50:08,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-11T20:50:08,155 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-11T20:50:08,155 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemState get assigned. Timeout = 60000ms 2024-12-11T20:50:08,156 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:50:08,162 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemState assigned to meta. Checking AM states. 2024-12-11T20:50:08,162 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:50:08,162 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemState assigned. 2024-12-11T20:50:08,163 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T20:50:08,168 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-11T20:50:08,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733950208168 (current time:1733950208168). 2024-12-11T20:50:08,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T20:50:08,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-11T20:50:08,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T20:50:08,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d1e629d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:50:08,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:50:08,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:50:08,206 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:50:08,207 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:50:08,207 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:50:08,207 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cd60da, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:50:08,207 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:50:08,208 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:50:08,208 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:50:08,210 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43772, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:50:08,211 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77a66ec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:50:08,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:50:08,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:50:08,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:50:08,215 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57708, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:50:08,217 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:50:08,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:50:08,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:50:08,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:50:08,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@236f4b25, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:50:08,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:50:08,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:50:08,229 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:50:08,230 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:50:08,230 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:50:08,230 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a151d2f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:50:08,230 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:50:08,231 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:50:08,231 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:50:08,233 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43790, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:50:08,234 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@691d1ecf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:50:08,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:50:08,236 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:50:08,236 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:50:08,238 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57714, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:50:08,247 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:50:08,248 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:50:08,249 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49412, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:50:08,251 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:50:08,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:50:08,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:50:08,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:50:08,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-11T20:50:08,251 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:50:08,252 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:50:08,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T20:50:08,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-11T20:50:08,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-12-11T20:50:08,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-11T20:50:08,263 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T20:50:08,267 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T20:50:08,272 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T20:50:08,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-11T20:50:08,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742021_1197 (size=170) 2024-12-11T20:50:08,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742021_1197 (size=170) 2024-12-11T20:50:08,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742021_1197 (size=170) 2024-12-11T20:50:08,469 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T20:50:08,469 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7a75fd46402d52e1a28d20ff753c678b}, {pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1946b6c529d923df30924eb1b6d5a6d3}] 2024-12-11T20:50:08,470 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1946b6c529d923df30924eb1b6d5a6d3 2024-12-11T20:50:08,471 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7a75fd46402d52e1a28d20ff753c678b 2024-12-11T20:50:08,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-11T20:50:08,623 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40781 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=102 2024-12-11T20:50:08,623 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40355 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=103 2024-12-11T20:50:08,624 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3. 2024-12-11T20:50:08,624 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b. 2024-12-11T20:50:08,624 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.HRegion(2603): Flush status journal for 7a75fd46402d52e1a28d20ff753c678b: 2024-12-11T20:50:08,624 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b. for emptySnaptb0-testExportFileSystemState completed. 2024-12-11T20:50:08,624 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-11T20:50:08,624 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:50:08,624 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T20:50:08,625 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2603): Flush status journal for 1946b6c529d923df30924eb1b6d5a6d3: 2024-12-11T20:50:08,625 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3. for emptySnaptb0-testExportFileSystemState completed. 2024-12-11T20:50:08,625 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-11T20:50:08,625 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:50:08,625 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T20:50:08,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742022_1198 (size=71) 2024-12-11T20:50:08,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742022_1198 (size=71) 2024-12-11T20:50:08,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742022_1198 (size=71) 2024-12-11T20:50:08,842 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3. 2024-12-11T20:50:08,842 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-12-11T20:50:08,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=103 2024-12-11T20:50:08,844 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 1946b6c529d923df30924eb1b6d5a6d3 2024-12-11T20:50:08,844 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1946b6c529d923df30924eb1b6d5a6d3 2024-12-11T20:50:08,848 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=103, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1946b6c529d923df30924eb1b6d5a6d3 in 377 msec 2024-12-11T20:50:08,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-11T20:50:08,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742023_1199 (size=71) 2024-12-11T20:50:08,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742023_1199 (size=71) 2024-12-11T20:50:08,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742023_1199 (size=71) 2024-12-11T20:50:08,904 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b. 2024-12-11T20:50:08,904 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=102 2024-12-11T20:50:08,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=102 2024-12-11T20:50:08,905 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 7a75fd46402d52e1a28d20ff753c678b 2024-12-11T20:50:08,905 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7a75fd46402d52e1a28d20ff753c678b 2024-12-11T20:50:08,910 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=102, resume processing ppid=101 2024-12-11T20:50:08,910 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=102, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7a75fd46402d52e1a28d20ff753c678b in 438 msec 2024-12-11T20:50:08,910 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T20:50:08,914 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T20:50:08,916 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T20:50:08,916 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-12-11T20:50:08,917 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-12-11T20:50:09,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742024_1200 (size=552) 2024-12-11T20:50:09,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742024_1200 (size=552) 2024-12-11T20:50:09,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742024_1200 (size=552) 2024-12-11T20:50:09,104 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T20:50:09,156 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T20:50:09,157 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-12-11T20:50:09,164 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T20:50:09,164 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-12-11T20:50:09,167 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=101, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 911 msec 2024-12-11T20:50:09,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-11T20:50:09,394 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-12-11T20:50:09,399 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='0e4a45053be7e5009141e14cc5f30ef95', locateType=CURRENT is [region=testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:50:09,401 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='18f2c246696f675d766fa1ed5437ca169', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3., hostname=5be53b084ac7,40355,1733949994820, seqNum=2] 2024-12-11T20:50:09,406 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='25fede1e93acf1ec8561808b6a1157988', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3., hostname=5be53b084ac7,40355,1733949994820, seqNum=2] 2024-12-11T20:50:09,425 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40781 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T20:50:09,426 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40355 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T20:50:09,433 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T20:50:09,445 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-12-11T20:50:09,446 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b. 2024-12-11T20:50:09,446 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:50:09,449 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T20:50:09,457 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T20:50:09,470 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T20:50:09,478 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-11T20:50:09,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733950209478 (current time:1733950209478). 2024-12-11T20:50:09,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T20:50:09,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-11T20:50:09,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T20:50:09,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e479e88, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:50:09,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:50:09,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:50:09,489 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:50:09,489 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:50:09,490 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:50:09,490 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6df1955e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:50:09,490 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:50:09,490 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:50:09,491 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:50:09,492 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43800, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:50:09,493 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d4e5035, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:50:09,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:50:09,494 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:50:09,495 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:50:09,499 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57726, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:50:09,501 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:50:09,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:50:09,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:50:09,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:50:09,503 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:50:09,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17442ba7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:50:09,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:50:09,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:50:09,508 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:50:09,508 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:50:09,508 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:50:09,508 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19ec9288, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:50:09,509 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:50:09,509 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:50:09,509 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:50:09,511 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43816, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:50:09,512 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69572c8e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:50:09,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:50:09,515 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:50:09,515 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:50:09,517 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57728, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:50:09,520 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:50:09,520 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:50:09,522 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49414, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:50:09,524 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:50:09,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:50:09,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:50:09,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:50:09,524 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:50:09,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-11T20:50:09,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T20:50:09,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-11T20:50:09,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-11T20:50:09,529 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T20:50:09,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-11T20:50:09,533 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T20:50:09,538 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T20:50:09,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742025_1201 (size=165) 2024-12-11T20:50:09,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742025_1201 (size=165) 2024-12-11T20:50:09,643 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T20:50:09,643 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7a75fd46402d52e1a28d20ff753c678b}, {pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1946b6c529d923df30924eb1b6d5a6d3}] 2024-12-11T20:50:09,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-11T20:50:09,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742025_1201 (size=165) 2024-12-11T20:50:09,644 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1946b6c529d923df30924eb1b6d5a6d3 2024-12-11T20:50:09,647 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7a75fd46402d52e1a28d20ff753c678b 2024-12-11T20:50:09,807 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40781 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=105 2024-12-11T20:50:09,808 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40355 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=106 2024-12-11T20:50:09,808 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b. 2024-12-11T20:50:09,808 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3. 2024-12-11T20:50:09,808 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2902): Flushing 7a75fd46402d52e1a28d20ff753c678b 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-11T20:50:09,808 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2902): Flushing 1946b6c529d923df30924eb1b6d5a6d3 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-11T20:50:09,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-11T20:50:09,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/7a75fd46402d52e1a28d20ff753c678b/.tmp/cf/756ad6f880574f7f9e5f9e56a19c769d is 71, key is 082953eb4ab60eccc1a1cb19e7a2a644/cf:q/1733950209424/Put/seqid=0 2024-12-11T20:50:09,869 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/1946b6c529d923df30924eb1b6d5a6d3/.tmp/cf/52f59c02766e4cdfa7a026e2e104985f is 71, key is 15c8d93d25cc69a3b6c0e9adefa17ed4/cf:q/1733950209426/Put/seqid=0 2024-12-11T20:50:09,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742026_1202 (size=5216) 2024-12-11T20:50:09,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742026_1202 (size=5216) 2024-12-11T20:50:09,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742026_1202 (size=5216) 2024-12-11T20:50:09,983 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/7a75fd46402d52e1a28d20ff753c678b/.tmp/cf/756ad6f880574f7f9e5f9e56a19c769d 2024-12-11T20:50:09,993 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/7a75fd46402d52e1a28d20ff753c678b/.tmp/cf/756ad6f880574f7f9e5f9e56a19c769d as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/7a75fd46402d52e1a28d20ff753c678b/cf/756ad6f880574f7f9e5f9e56a19c769d 2024-12-11T20:50:10,002 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/7a75fd46402d52e1a28d20ff753c678b/cf/756ad6f880574f7f9e5f9e56a19c769d, entries=2, sequenceid=6, filesize=5.1 K 2024-12-11T20:50:10,003 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 7a75fd46402d52e1a28d20ff753c678b in 195ms, sequenceid=6, compaction requested=false 2024-12-11T20:50:10,003 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-12-11T20:50:10,004 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2603): Flush status journal for 7a75fd46402d52e1a28d20ff753c678b: 2024-12-11T20:50:10,004 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b. for snaptb0-testExportFileSystemState completed. 2024-12-11T20:50:10,004 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-11T20:50:10,004 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:50:10,004 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/7a75fd46402d52e1a28d20ff753c678b/cf/756ad6f880574f7f9e5f9e56a19c769d] hfiles 2024-12-11T20:50:10,005 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/7a75fd46402d52e1a28d20ff753c678b/cf/756ad6f880574f7f9e5f9e56a19c769d for snapshot=snaptb0-testExportFileSystemState 2024-12-11T20:50:10,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742027_1203 (size=8392) 2024-12-11T20:50:10,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742027_1203 (size=8392) 2024-12-11T20:50:10,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742027_1203 (size=8392) 2024-12-11T20:50:10,033 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/1946b6c529d923df30924eb1b6d5a6d3/.tmp/cf/52f59c02766e4cdfa7a026e2e104985f 2024-12-11T20:50:10,044 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/1946b6c529d923df30924eb1b6d5a6d3/.tmp/cf/52f59c02766e4cdfa7a026e2e104985f as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/1946b6c529d923df30924eb1b6d5a6d3/cf/52f59c02766e4cdfa7a026e2e104985f 2024-12-11T20:50:10,056 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/1946b6c529d923df30924eb1b6d5a6d3/cf/52f59c02766e4cdfa7a026e2e104985f, entries=48, sequenceid=6, filesize=8.2 K 2024-12-11T20:50:10,058 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 1946b6c529d923df30924eb1b6d5a6d3 in 250ms, sequenceid=6, compaction requested=false 2024-12-11T20:50:10,058 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2603): Flush status journal for 1946b6c529d923df30924eb1b6d5a6d3: 2024-12-11T20:50:10,058 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3. for snaptb0-testExportFileSystemState completed. 2024-12-11T20:50:10,058 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-11T20:50:10,058 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:50:10,058 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/1946b6c529d923df30924eb1b6d5a6d3/cf/52f59c02766e4cdfa7a026e2e104985f] hfiles 2024-12-11T20:50:10,058 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/1946b6c529d923df30924eb1b6d5a6d3/cf/52f59c02766e4cdfa7a026e2e104985f for snapshot=snaptb0-testExportFileSystemState 2024-12-11T20:50:10,077 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0003_000001 (auth:SIMPLE) from 127.0.0.1:50642 2024-12-11T20:50:10,125 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_3/usercache/jenkins/appcache/application_1733950008312_0003/container_1733950008312_0003_01_000001/launch_container.sh] 2024-12-11T20:50:10,125 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_3/usercache/jenkins/appcache/application_1733950008312_0003/container_1733950008312_0003_01_000001/container_tokens] 2024-12-11T20:50:10,125 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_3/usercache/jenkins/appcache/application_1733950008312_0003/container_1733950008312_0003_01_000001/sysfs] 2024-12-11T20:50:10,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-11T20:50:10,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742028_1204 (size=110) 2024-12-11T20:50:10,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742028_1204 (size=110) 2024-12-11T20:50:10,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742028_1204 (size=110) 2024-12-11T20:50:10,188 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b. 2024-12-11T20:50:10,188 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-11T20:50:10,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=105 2024-12-11T20:50:10,189 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 7a75fd46402d52e1a28d20ff753c678b 2024-12-11T20:50:10,189 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7a75fd46402d52e1a28d20ff753c678b 2024-12-11T20:50:10,192 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=105, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7a75fd46402d52e1a28d20ff753c678b in 547 msec 2024-12-11T20:50:10,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742029_1205 (size=110) 2024-12-11T20:50:10,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742029_1205 (size=110) 2024-12-11T20:50:10,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742029_1205 (size=110) 2024-12-11T20:50:10,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3. 2024-12-11T20:50:10,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=106 2024-12-11T20:50:10,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=106 2024-12-11T20:50:10,277 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 1946b6c529d923df30924eb1b6d5a6d3 2024-12-11T20:50:10,277 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1946b6c529d923df30924eb1b6d5a6d3 2024-12-11T20:50:10,285 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=106, resume processing ppid=104 2024-12-11T20:50:10,285 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T20:50:10,285 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=106, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1946b6c529d923df30924eb1b6d5a6d3 in 636 msec 2024-12-11T20:50:10,286 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T20:50:10,290 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T20:50:10,290 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-12-11T20:50:10,295 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-11T20:50:10,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742030_1206 (size=630) 2024-12-11T20:50:10,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742030_1206 (size=630) 2024-12-11T20:50:10,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742030_1206 (size=630) 2024-12-11T20:50:10,397 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T20:50:10,427 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T20:50:10,428 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-11T20:50:10,431 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T20:50:10,431 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-11T20:50:10,434 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=104, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 906 msec 2024-12-11T20:50:10,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-11T20:50:10,673 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-12-11T20:50:10,674 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950210674 2024-12-11T20:50:10,674 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:36717, tgtDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950210674, rawTgtDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950210674, srcFsUri=hdfs://localhost:36717, srcDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:50:10,736 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:36717, inputRoot=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:50:10,736 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_754086280_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950210674, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950210674/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-11T20:50:10,741 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T20:50:10,765 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950210674/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-11T20:50:10,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742031_1207 (size=165) 2024-12-11T20:50:10,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742031_1207 (size=165) 2024-12-11T20:50:10,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742031_1207 (size=165) 2024-12-11T20:50:10,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742032_1208 (size=630) 2024-12-11T20:50:10,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742032_1208 (size=630) 2024-12-11T20:50:10,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742032_1208 (size=630) 2024-12-11T20:50:10,897 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:50:10,898 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:50:10,898 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:50:11,393 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T20:50:12,520 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop-1959130713452745034.jar 2024-12-11T20:50:12,521 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:50:12,521 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:50:12,626 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop-6311555600008769992.jar 2024-12-11T20:50:12,627 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:50:12,627 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:50:12,628 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:50:12,628 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:50:12,629 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:50:12,629 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:50:12,629 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T20:50:12,630 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T20:50:12,630 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T20:50:12,631 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T20:50:12,631 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T20:50:12,634 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T20:50:12,635 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T20:50:12,635 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T20:50:12,636 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T20:50:12,636 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T20:50:12,637 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T20:50:12,637 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:50:12,638 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:50:12,639 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:50:12,640 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:50:12,640 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:50:12,641 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:50:12,642 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:50:12,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742033_1209 (size=131440) 2024-12-11T20:50:12,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742033_1209 (size=131440) 2024-12-11T20:50:12,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742033_1209 (size=131440) 2024-12-11T20:50:12,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742034_1210 (size=4188619) 2024-12-11T20:50:12,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742034_1210 (size=4188619) 2024-12-11T20:50:12,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742034_1210 (size=4188619) 2024-12-11T20:50:13,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742035_1211 (size=1323991) 2024-12-11T20:50:13,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742035_1211 (size=1323991) 2024-12-11T20:50:13,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742035_1211 (size=1323991) 2024-12-11T20:50:13,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742036_1212 (size=903544) 2024-12-11T20:50:13,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742036_1212 (size=903544) 2024-12-11T20:50:13,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742036_1212 (size=903544) 2024-12-11T20:50:13,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742037_1213 (size=8360360) 2024-12-11T20:50:13,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742037_1213 (size=8360360) 2024-12-11T20:50:13,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742037_1213 (size=8360360) 2024-12-11T20:50:13,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742038_1214 (size=1877034) 2024-12-11T20:50:13,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742038_1214 (size=1877034) 2024-12-11T20:50:13,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742038_1214 (size=1877034) 2024-12-11T20:50:13,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742039_1215 (size=77835) 2024-12-11T20:50:13,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742039_1215 (size=77835) 2024-12-11T20:50:13,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742039_1215 (size=77835) 2024-12-11T20:50:13,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742040_1216 (size=30949) 2024-12-11T20:50:13,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742040_1216 (size=30949) 2024-12-11T20:50:13,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742040_1216 (size=30949) 2024-12-11T20:50:13,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742041_1217 (size=1597213) 2024-12-11T20:50:13,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742041_1217 (size=1597213) 2024-12-11T20:50:13,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742041_1217 (size=1597213) 2024-12-11T20:50:13,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742042_1218 (size=443172) 2024-12-11T20:50:13,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742042_1218 (size=443172) 2024-12-11T20:50:13,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742042_1218 (size=443172) 2024-12-11T20:50:13,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742043_1219 (size=4695811) 2024-12-11T20:50:13,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742043_1219 (size=4695811) 2024-12-11T20:50:13,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742043_1219 (size=4695811) 2024-12-11T20:50:13,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742044_1220 (size=232957) 2024-12-11T20:50:13,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742044_1220 (size=232957) 2024-12-11T20:50:13,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742044_1220 (size=232957) 2024-12-11T20:50:13,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742045_1221 (size=127628) 2024-12-11T20:50:13,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742045_1221 (size=127628) 2024-12-11T20:50:13,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742045_1221 (size=127628) 2024-12-11T20:50:13,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742046_1222 (size=20406) 2024-12-11T20:50:13,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742046_1222 (size=20406) 2024-12-11T20:50:13,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742046_1222 (size=20406) 2024-12-11T20:50:13,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742047_1223 (size=5175431) 2024-12-11T20:50:13,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742047_1223 (size=5175431) 2024-12-11T20:50:13,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742047_1223 (size=5175431) 2024-12-11T20:50:13,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742048_1224 (size=217634) 2024-12-11T20:50:13,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742048_1224 (size=217634) 2024-12-11T20:50:13,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742048_1224 (size=217634) 2024-12-11T20:50:13,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742049_1225 (size=1832290) 2024-12-11T20:50:13,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742049_1225 (size=1832290) 2024-12-11T20:50:13,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742049_1225 (size=1832290) 2024-12-11T20:50:13,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742050_1226 (size=322274) 2024-12-11T20:50:13,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742050_1226 (size=322274) 2024-12-11T20:50:13,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742050_1226 (size=322274) 2024-12-11T20:50:13,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742051_1227 (size=503880) 2024-12-11T20:50:13,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742051_1227 (size=503880) 2024-12-11T20:50:13,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742051_1227 (size=503880) 2024-12-11T20:50:13,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742052_1228 (size=29229) 2024-12-11T20:50:13,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742052_1228 (size=29229) 2024-12-11T20:50:13,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742052_1228 (size=29229) 2024-12-11T20:50:13,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742053_1229 (size=24096) 2024-12-11T20:50:13,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742053_1229 (size=24096) 2024-12-11T20:50:13,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742053_1229 (size=24096) 2024-12-11T20:50:13,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742054_1230 (size=6426228) 2024-12-11T20:50:13,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742054_1230 (size=6426228) 2024-12-11T20:50:13,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742054_1230 (size=6426228) 2024-12-11T20:50:13,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742055_1231 (size=111872) 2024-12-11T20:50:13,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742055_1231 (size=111872) 2024-12-11T20:50:13,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742055_1231 (size=111872) 2024-12-11T20:50:13,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742056_1232 (size=45609) 2024-12-11T20:50:13,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742056_1232 (size=45609) 2024-12-11T20:50:13,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742056_1232 (size=45609) 2024-12-11T20:50:13,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742057_1233 (size=136454) 2024-12-11T20:50:13,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742057_1233 (size=136454) 2024-12-11T20:50:13,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742057_1233 (size=136454) 2024-12-11T20:50:13,724 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T20:50:13,725 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-12-11T20:50:13,727 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.2 K 2024-12-11T20:50:13,727 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.1 K 2024-12-11T20:50:13,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742058_1234 (size=447) 2024-12-11T20:50:13,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742058_1234 (size=447) 2024-12-11T20:50:13,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742058_1234 (size=447) 2024-12-11T20:50:13,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742059_1235 (size=21) 2024-12-11T20:50:13,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742059_1235 (size=21) 2024-12-11T20:50:13,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742059_1235 (size=21) 2024-12-11T20:50:13,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742060_1236 (size=304006) 2024-12-11T20:50:13,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742060_1236 (size=304006) 2024-12-11T20:50:13,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742060_1236 (size=304006) 2024-12-11T20:50:13,767 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T20:50:13,767 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T20:50:13,973 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-11T20:50:13,973 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-11T20:50:13,974 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-11T20:50:13,974 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-11T20:50:14,057 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0004_000001 (auth:SIMPLE) from 127.0.0.1:54738 2024-12-11T20:50:19,476 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T20:50:20,764 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0004_000001 (auth:SIMPLE) from 127.0.0.1:48206 2024-12-11T20:50:21,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742061_1237 (size=349704) 2024-12-11T20:50:21,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742061_1237 (size=349704) 2024-12-11T20:50:21,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742061_1237 (size=349704) 2024-12-11T20:50:23,141 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0004_000001 (auth:SIMPLE) from 127.0.0.1:39070 2024-12-11T20:50:23,151 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0004_000001 (auth:SIMPLE) from 127.0.0.1:40986 2024-12-11T20:50:29,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742062_1238 (size=8392) 2024-12-11T20:50:29,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742062_1238 (size=8392) 2024-12-11T20:50:29,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742062_1238 (size=8392) 2024-12-11T20:50:29,401 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_3/usercache/jenkins/appcache/application_1733950008312_0004/container_1733950008312_0004_01_000002/launch_container.sh] 2024-12-11T20:50:29,401 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_3/usercache/jenkins/appcache/application_1733950008312_0004/container_1733950008312_0004_01_000002/container_tokens] 2024-12-11T20:50:29,401 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_3/usercache/jenkins/appcache/application_1733950008312_0004/container_1733950008312_0004_01_000002/sysfs] 2024-12-11T20:50:31,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742064_1240 (size=5216) 2024-12-11T20:50:31,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742064_1240 (size=5216) 2024-12-11T20:50:31,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742064_1240 (size=5216) 2024-12-11T20:50:31,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742063_1239 (size=22168) 2024-12-11T20:50:31,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742063_1239 (size=22168) 2024-12-11T20:50:31,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742063_1239 (size=22168) 2024-12-11T20:50:31,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742065_1241 (size=466) 2024-12-11T20:50:31,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742065_1241 (size=466) 2024-12-11T20:50:31,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742065_1241 (size=466) 2024-12-11T20:50:31,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742066_1242 (size=22168) 2024-12-11T20:50:31,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742066_1242 (size=22168) 2024-12-11T20:50:31,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742066_1242 (size=22168) 2024-12-11T20:50:31,175 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_1/usercache/jenkins/appcache/application_1733950008312_0004/container_1733950008312_0004_01_000003/launch_container.sh] 2024-12-11T20:50:31,175 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_1/usercache/jenkins/appcache/application_1733950008312_0004/container_1733950008312_0004_01_000003/container_tokens] 2024-12-11T20:50:31,175 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_1/usercache/jenkins/appcache/application_1733950008312_0004/container_1733950008312_0004_01_000003/sysfs] 2024-12-11T20:50:31,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742067_1243 (size=349704) 2024-12-11T20:50:31,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742067_1243 (size=349704) 2024-12-11T20:50:31,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742067_1243 (size=349704) 2024-12-11T20:50:31,197 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0004_000001 (auth:SIMPLE) from 127.0.0.1:53746 2024-12-11T20:50:31,783 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T20:50:33,198 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T20:50:33,199 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T20:50:33,225 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemState 2024-12-11T20:50:33,225 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T20:50:33,226 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T20:50:33,226 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_754086280_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-11T20:50:33,227 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-11T20:50:33,227 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-11T20:50:33,227 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_754086280_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950210674/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950210674/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-11T20:50:33,228 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950210674/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-11T20:50:33,228 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950210674/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-11T20:50:33,240 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-12-11T20:50:33,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=107, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState 2024-12-11T20:50:33,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-11T20:50:33,249 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950233249"}]},"ts":"1733950233249"} 2024-12-11T20:50:33,251 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-12-11T20:50:33,251 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-12-11T20:50:33,253 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-12-11T20:50:33,255 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=7a75fd46402d52e1a28d20ff753c678b, UNASSIGN}, {pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=1946b6c529d923df30924eb1b6d5a6d3, UNASSIGN}] 2024-12-11T20:50:33,257 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=1946b6c529d923df30924eb1b6d5a6d3, UNASSIGN 2024-12-11T20:50:33,257 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=7a75fd46402d52e1a28d20ff753c678b, UNASSIGN 2024-12-11T20:50:33,260 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=7a75fd46402d52e1a28d20ff753c678b, regionState=CLOSING, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:50:33,261 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=1946b6c529d923df30924eb1b6d5a6d3, regionState=CLOSING, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:50:33,265 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=7a75fd46402d52e1a28d20ff753c678b, UNASSIGN because future has completed 2024-12-11T20:50:33,265 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T20:50:33,266 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=111, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7a75fd46402d52e1a28d20ff753c678b, server=5be53b084ac7,40781,1733949995244}] 2024-12-11T20:50:33,266 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=1946b6c529d923df30924eb1b6d5a6d3, UNASSIGN because future has completed 2024-12-11T20:50:33,267 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T20:50:33,267 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=112, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1946b6c529d923df30924eb1b6d5a6d3, server=5be53b084ac7,40355,1733949994820}] 2024-12-11T20:50:33,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-11T20:50:33,419 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(122): Close 7a75fd46402d52e1a28d20ff753c678b 2024-12-11T20:50:33,419 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T20:50:33,419 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1722): Closing 7a75fd46402d52e1a28d20ff753c678b, disabling compactions & flushes 2024-12-11T20:50:33,419 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b. 2024-12-11T20:50:33,419 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b. 2024-12-11T20:50:33,419 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b. after waiting 0 ms 2024-12-11T20:50:33,419 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b. 2024-12-11T20:50:33,426 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/7a75fd46402d52e1a28d20ff753c678b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T20:50:33,427 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:50:33,427 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b. 2024-12-11T20:50:33,427 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1676): Region close journal for 7a75fd46402d52e1a28d20ff753c678b: Waiting for close lock at 1733950233419Running coprocessor pre-close hooks at 1733950233419Disabling compacts and flushes for region at 1733950233419Disabling writes for close at 1733950233419Writing region close event to WAL at 1733950233420 (+1 ms)Running coprocessor post-close hooks at 1733950233427 (+7 ms)Closed at 1733950233427 2024-12-11T20:50:33,430 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(157): Closed 7a75fd46402d52e1a28d20ff753c678b 2024-12-11T20:50:33,431 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=7a75fd46402d52e1a28d20ff753c678b, regionState=CLOSED 2024-12-11T20:50:33,434 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=111, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7a75fd46402d52e1a28d20ff753c678b, server=5be53b084ac7,40781,1733949995244 because future has completed 2024-12-11T20:50:33,435 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(122): Close 1946b6c529d923df30924eb1b6d5a6d3 2024-12-11T20:50:33,436 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T20:50:33,436 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1722): Closing 1946b6c529d923df30924eb1b6d5a6d3, disabling compactions & flushes 2024-12-11T20:50:33,436 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3. 2024-12-11T20:50:33,436 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3. 2024-12-11T20:50:33,436 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3. after waiting 0 ms 2024-12-11T20:50:33,436 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3. 2024-12-11T20:50:33,447 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=111, resume processing ppid=109 2024-12-11T20:50:33,447 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=111, ppid=109, state=SUCCESS, hasLock=false; CloseRegionProcedure 7a75fd46402d52e1a28d20ff753c678b, server=5be53b084ac7,40781,1733949995244 in 172 msec 2024-12-11T20:50:33,451 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=109, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=7a75fd46402d52e1a28d20ff753c678b, UNASSIGN in 192 msec 2024-12-11T20:50:33,514 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/1946b6c529d923df30924eb1b6d5a6d3/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T20:50:33,515 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:50:33,516 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3. 2024-12-11T20:50:33,516 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1676): Region close journal for 1946b6c529d923df30924eb1b6d5a6d3: Waiting for close lock at 1733950233436Running coprocessor pre-close hooks at 1733950233436Disabling compacts and flushes for region at 1733950233436Disabling writes for close at 1733950233436Writing region close event to WAL at 1733950233455 (+19 ms)Running coprocessor post-close hooks at 1733950233515 (+60 ms)Closed at 1733950233516 (+1 ms) 2024-12-11T20:50:33,519 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(157): Closed 1946b6c529d923df30924eb1b6d5a6d3 2024-12-11T20:50:33,520 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=1946b6c529d923df30924eb1b6d5a6d3, regionState=CLOSED 2024-12-11T20:50:33,523 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=112, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1946b6c529d923df30924eb1b6d5a6d3, server=5be53b084ac7,40355,1733949994820 because future has completed 2024-12-11T20:50:33,528 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=112, resume processing ppid=110 2024-12-11T20:50:33,530 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=112, ppid=110, state=SUCCESS, hasLock=false; CloseRegionProcedure 1946b6c529d923df30924eb1b6d5a6d3, server=5be53b084ac7,40355,1733949994820 in 258 msec 2024-12-11T20:50:33,532 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=110, resume processing ppid=108 2024-12-11T20:50:33,532 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=110, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=1946b6c529d923df30924eb1b6d5a6d3, UNASSIGN in 273 msec 2024-12-11T20:50:33,537 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=108, resume processing ppid=107 2024-12-11T20:50:33,537 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=108, ppid=107, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 281 msec 2024-12-11T20:50:33,540 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950233540"}]},"ts":"1733950233540"} 2024-12-11T20:50:33,548 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-12-11T20:50:33,549 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-12-11T20:50:33,561 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=107, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState in 310 msec 2024-12-11T20:50:33,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-11T20:50:33,563 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-11T20:50:33,564 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-12-11T20:50:33,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-11T20:50:33,574 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-11T20:50:33,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemState 2024-12-11T20:50:33,579 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-12-11T20:50:33,584 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=113, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-11T20:50:33,591 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/1946b6c529d923df30924eb1b6d5a6d3 2024-12-11T20:50:33,591 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/7a75fd46402d52e1a28d20ff753c678b 2024-12-11T20:50:33,596 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/7a75fd46402d52e1a28d20ff753c678b/cf, FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/7a75fd46402d52e1a28d20ff753c678b/recovered.edits] 2024-12-11T20:50:33,598 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/1946b6c529d923df30924eb1b6d5a6d3/cf, FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/1946b6c529d923df30924eb1b6d5a6d3/recovered.edits] 2024-12-11T20:50:33,605 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/7a75fd46402d52e1a28d20ff753c678b/cf/756ad6f880574f7f9e5f9e56a19c769d to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportFileSystemState/7a75fd46402d52e1a28d20ff753c678b/cf/756ad6f880574f7f9e5f9e56a19c769d 2024-12-11T20:50:33,625 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/1946b6c529d923df30924eb1b6d5a6d3/cf/52f59c02766e4cdfa7a026e2e104985f to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportFileSystemState/1946b6c529d923df30924eb1b6d5a6d3/cf/52f59c02766e4cdfa7a026e2e104985f 2024-12-11T20:50:33,626 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/7a75fd46402d52e1a28d20ff753c678b/recovered.edits/9.seqid to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportFileSystemState/7a75fd46402d52e1a28d20ff753c678b/recovered.edits/9.seqid 2024-12-11T20:50:33,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-11T20:50:33,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-11T20:50:33,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-11T20:50:33,631 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-11T20:50:33,631 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-11T20:50:33,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-11T20:50:33,636 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/7a75fd46402d52e1a28d20ff753c678b 2024-12-11T20:50:33,638 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-11T20:50:33,643 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/1946b6c529d923df30924eb1b6d5a6d3/recovered.edits/9.seqid to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportFileSystemState/1946b6c529d923df30924eb1b6d5a6d3/recovered.edits/9.seqid 2024-12-11T20:50:33,645 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemState/1946b6c529d923df30924eb1b6d5a6d3 2024-12-11T20:50:33,645 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-12-11T20:50:33,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:50:33,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-11T20:50:33,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:50:33,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-11T20:50:33,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-11T20:50:33,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:50:33,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:50:33,649 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data null 2024-12-11T20:50:33,649 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-11T20:50:33,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=113 2024-12-11T20:50:33,650 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=113, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-11T20:50:33,654 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-12-11T20:50:33,657 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemState' descriptor. 2024-12-11T20:50:33,659 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=113, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-11T20:50:33,659 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemState' from region states. 2024-12-11T20:50:33,659 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733950233659"}]},"ts":"9223372036854775807"} 2024-12-11T20:50:33,659 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733950233659"}]},"ts":"9223372036854775807"} 2024-12-11T20:50:33,665 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T20:50:33,665 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 7a75fd46402d52e1a28d20ff753c678b, NAME => 'testtb-testExportFileSystemState,,1733950207003.7a75fd46402d52e1a28d20ff753c678b.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 1946b6c529d923df30924eb1b6d5a6d3, NAME => 'testtb-testExportFileSystemState,1,1733950207003.1946b6c529d923df30924eb1b6d5a6d3.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T20:50:33,665 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemState' as deleted. 2024-12-11T20:50:33,665 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733950233665"}]},"ts":"9223372036854775807"} 2024-12-11T20:50:33,669 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemState state from META 2024-12-11T20:50:33,670 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=113, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-11T20:50:33,673 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=113, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState in 106 msec 2024-12-11T20:50:33,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=113 2024-12-11T20:50:33,753 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemState 2024-12-11T20:50:33,753 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-11T20:50:33,766 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" type: DISABLED 2024-12-11T20:50:33,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-12-11T20:50:33,771 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" type: DISABLED 2024-12-11T20:50:33,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemState 2024-12-11T20:50:33,822 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=799 (was 797) Potentially hanging thread: IPC Client (166867004) connection to localhost/127.0.0.1:33611 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33611 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34027 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-3596 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39661 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 117887) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_754086280_22 at /127.0.0.1:32850 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-80118386_1 at /127.0.0.1:57188 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_754086280_22 at /127.0.0.1:57218 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_754086280_22 at /127.0.0.1:59684 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=796 (was 801), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1006 (was 953) - SystemLoadAverage LEAK? -, ProcessCount=29 (was 29), AvailableMemoryMB=1300 (was 1377) 2024-12-11T20:50:33,822 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=799 is superior to 500 2024-12-11T20:50:33,874 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=799, OpenFileDescriptor=796, MaxFileDescriptor=1048576, SystemLoadAverage=1006, ProcessCount=29, AvailableMemoryMB=1284 2024-12-11T20:50:33,874 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=799 is superior to 500 2024-12-11T20:50:33,876 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T20:50:33,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports 2024-12-11T20:50:33,881 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T20:50:33,881 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:50:33,884 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 114 2024-12-11T20:50:33,884 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T20:50:33,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-11T20:50:33,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742068_1244 (size=404) 2024-12-11T20:50:33,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742068_1244 (size=404) 2024-12-11T20:50:33,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742068_1244 (size=404) 2024-12-11T20:50:33,968 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 91274771532f8854159bd2395a185159, NAME => 'testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:50:33,972 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 2db39fc82b20d27944df82162e98777e, NAME => 'testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:50:33,975 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-11T20:50:33,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-11T20:50:34,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742070_1246 (size=65) 2024-12-11T20:50:34,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742070_1246 (size=65) 2024-12-11T20:50:34,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742070_1246 (size=65) 2024-12-11T20:50:34,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742069_1245 (size=65) 2024-12-11T20:50:34,077 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:50:34,077 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1722): Closing 91274771532f8854159bd2395a185159, disabling compactions & flushes 2024-12-11T20:50:34,077 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159. 2024-12-11T20:50:34,077 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159. 2024-12-11T20:50:34,077 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159. after waiting 0 ms 2024-12-11T20:50:34,077 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159. 2024-12-11T20:50:34,077 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159. 2024-12-11T20:50:34,077 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1676): Region close journal for 91274771532f8854159bd2395a185159: Waiting for close lock at 1733950234077Disabling compacts and flushes for region at 1733950234077Disabling writes for close at 1733950234077Writing region close event to WAL at 1733950234077Closed at 1733950234077 2024-12-11T20:50:34,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742069_1245 (size=65) 2024-12-11T20:50:34,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742069_1245 (size=65) 2024-12-11T20:50:34,080 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:50:34,080 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1722): Closing 2db39fc82b20d27944df82162e98777e, disabling compactions & flushes 2024-12-11T20:50:34,080 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e. 2024-12-11T20:50:34,080 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e. 2024-12-11T20:50:34,080 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e. after waiting 0 ms 2024-12-11T20:50:34,080 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e. 2024-12-11T20:50:34,080 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e. 2024-12-11T20:50:34,080 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1676): Region close journal for 2db39fc82b20d27944df82162e98777e: Waiting for close lock at 1733950234080Disabling compacts and flushes for region at 1733950234080Disabling writes for close at 1733950234080Writing region close event to WAL at 1733950234080Closed at 1733950234080 2024-12-11T20:50:34,083 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T20:50:34,083 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733950234083"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950234083"}]},"ts":"1733950234083"} 2024-12-11T20:50:34,083 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733950234083"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950234083"}]},"ts":"1733950234083"} 2024-12-11T20:50:34,087 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T20:50:34,089 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T20:50:34,089 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950234089"}]},"ts":"1733950234089"} 2024-12-11T20:50:34,100 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-12-11T20:50:34,100 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {5be53b084ac7=0} racks are {/default-rack=0} 2024-12-11T20:50:34,106 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T20:50:34,106 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T20:50:34,106 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T20:50:34,106 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T20:50:34,106 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T20:50:34,106 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T20:50:34,106 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T20:50:34,106 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T20:50:34,106 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T20:50:34,106 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T20:50:34,107 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=91274771532f8854159bd2395a185159, ASSIGN}, {pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=2db39fc82b20d27944df82162e98777e, ASSIGN}] 2024-12-11T20:50:34,111 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=91274771532f8854159bd2395a185159, ASSIGN 2024-12-11T20:50:34,113 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=91274771532f8854159bd2395a185159, ASSIGN; state=OFFLINE, location=5be53b084ac7,39865,1733949995112; forceNewPlan=false, retain=false 2024-12-11T20:50:34,114 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=2db39fc82b20d27944df82162e98777e, ASSIGN 2024-12-11T20:50:34,116 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=2db39fc82b20d27944df82162e98777e, ASSIGN; state=OFFLINE, location=5be53b084ac7,40781,1733949995244; forceNewPlan=false, retain=false 2024-12-11T20:50:34,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-11T20:50:34,264 INFO [5be53b084ac7:41779 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T20:50:34,264 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=91274771532f8854159bd2395a185159, regionState=OPENING, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:50:34,264 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=2db39fc82b20d27944df82162e98777e, regionState=OPENING, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:50:34,267 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=91274771532f8854159bd2395a185159, ASSIGN because future has completed 2024-12-11T20:50:34,268 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=117, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 91274771532f8854159bd2395a185159, server=5be53b084ac7,39865,1733949995112}] 2024-12-11T20:50:34,270 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=2db39fc82b20d27944df82162e98777e, ASSIGN because future has completed 2024-12-11T20:50:34,271 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=118, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2db39fc82b20d27944df82162e98777e, server=5be53b084ac7,40781,1733949995244}] 2024-12-11T20:50:34,443 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e. 2024-12-11T20:50:34,443 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7752): Opening region: {ENCODED => 2db39fc82b20d27944df82162e98777e, NAME => 'testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T20:50:34,443 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e. service=AccessControlService 2024-12-11T20:50:34,443 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:50:34,444 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 2db39fc82b20d27944df82162e98777e 2024-12-11T20:50:34,444 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:50:34,444 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7794): checking encryption for 2db39fc82b20d27944df82162e98777e 2024-12-11T20:50:34,444 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7797): checking classloading for 2db39fc82b20d27944df82162e98777e 2024-12-11T20:50:34,486 INFO [StoreOpener-2db39fc82b20d27944df82162e98777e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 2db39fc82b20d27944df82162e98777e 2024-12-11T20:50:34,488 INFO [StoreOpener-2db39fc82b20d27944df82162e98777e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2db39fc82b20d27944df82162e98777e columnFamilyName cf 2024-12-11T20:50:34,488 DEBUG [StoreOpener-2db39fc82b20d27944df82162e98777e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:50:34,489 INFO [StoreOpener-2db39fc82b20d27944df82162e98777e-1 {}] regionserver.HStore(327): Store=2db39fc82b20d27944df82162e98777e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:50:34,489 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1038): replaying wal for 2db39fc82b20d27944df82162e98777e 2024-12-11T20:50:34,490 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/2db39fc82b20d27944df82162e98777e 2024-12-11T20:50:34,491 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159. 2024-12-11T20:50:34,491 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/2db39fc82b20d27944df82162e98777e 2024-12-11T20:50:34,491 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7752): Opening region: {ENCODED => 91274771532f8854159bd2395a185159, NAME => 'testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T20:50:34,491 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159. service=AccessControlService 2024-12-11T20:50:34,491 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1048): stopping wal replay for 2db39fc82b20d27944df82162e98777e 2024-12-11T20:50:34,491 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1060): Cleaning up temporary data for 2db39fc82b20d27944df82162e98777e 2024-12-11T20:50:34,491 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:50:34,492 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 91274771532f8854159bd2395a185159 2024-12-11T20:50:34,492 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:50:34,492 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7794): checking encryption for 91274771532f8854159bd2395a185159 2024-12-11T20:50:34,492 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7797): checking classloading for 91274771532f8854159bd2395a185159 2024-12-11T20:50:34,493 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1093): writing seq id for 2db39fc82b20d27944df82162e98777e 2024-12-11T20:50:34,508 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/2db39fc82b20d27944df82162e98777e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:50:34,509 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1114): Opened 2db39fc82b20d27944df82162e98777e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62101967, jitterRate=-0.07460857927799225}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:50:34,509 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2db39fc82b20d27944df82162e98777e 2024-12-11T20:50:34,509 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1006): Region open journal for 2db39fc82b20d27944df82162e98777e: Running coprocessor pre-open hook at 1733950234444Writing region info on filesystem at 1733950234444Initializing all the Stores at 1733950234449 (+5 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950234450 (+1 ms)Cleaning up temporary data from old regions at 1733950234491 (+41 ms)Running coprocessor post-open hooks at 1733950234509 (+18 ms)Region opened successfully at 1733950234509 2024-12-11T20:50:34,510 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e., pid=118, masterSystemTime=1733950234430 2024-12-11T20:50:34,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-11T20:50:34,513 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e. 2024-12-11T20:50:34,513 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e. 2024-12-11T20:50:34,514 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=2db39fc82b20d27944df82162e98777e, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:50:34,518 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=118, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2db39fc82b20d27944df82162e98777e, server=5be53b084ac7,40781,1733949995244 because future has completed 2024-12-11T20:50:34,522 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=118, resume processing ppid=116 2024-12-11T20:50:34,522 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=118, ppid=116, state=SUCCESS, hasLock=false; OpenRegionProcedure 2db39fc82b20d27944df82162e98777e, server=5be53b084ac7,40781,1733949995244 in 248 msec 2024-12-11T20:50:34,523 INFO [StoreOpener-91274771532f8854159bd2395a185159-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 91274771532f8854159bd2395a185159 2024-12-11T20:50:34,525 INFO [StoreOpener-91274771532f8854159bd2395a185159-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 91274771532f8854159bd2395a185159 columnFamilyName cf 2024-12-11T20:50:34,526 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=116, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=2db39fc82b20d27944df82162e98777e, ASSIGN in 416 msec 2024-12-11T20:50:34,526 DEBUG [StoreOpener-91274771532f8854159bd2395a185159-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:50:34,526 INFO [StoreOpener-91274771532f8854159bd2395a185159-1 {}] regionserver.HStore(327): Store=91274771532f8854159bd2395a185159/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:50:34,527 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1038): replaying wal for 91274771532f8854159bd2395a185159 2024-12-11T20:50:34,528 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/91274771532f8854159bd2395a185159 2024-12-11T20:50:34,528 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/91274771532f8854159bd2395a185159 2024-12-11T20:50:34,533 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1048): stopping wal replay for 91274771532f8854159bd2395a185159 2024-12-11T20:50:34,533 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1060): Cleaning up temporary data for 91274771532f8854159bd2395a185159 2024-12-11T20:50:34,536 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1093): writing seq id for 91274771532f8854159bd2395a185159 2024-12-11T20:50:34,551 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-12-11T20:50:34,556 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/91274771532f8854159bd2395a185159/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:50:34,557 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1114): Opened 91274771532f8854159bd2395a185159; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60119667, jitterRate=-0.10414715111255646}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:50:34,557 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 91274771532f8854159bd2395a185159 2024-12-11T20:50:34,557 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1006): Region open journal for 91274771532f8854159bd2395a185159: Running coprocessor pre-open hook at 1733950234492Writing region info on filesystem at 1733950234492Initializing all the Stores at 1733950234502 (+10 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950234502Cleaning up temporary data from old regions at 1733950234533 (+31 ms)Running coprocessor post-open hooks at 1733950234557 (+24 ms)Region opened successfully at 1733950234557 2024-12-11T20:50:34,559 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159., pid=117, masterSystemTime=1733950234427 2024-12-11T20:50:34,564 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159. 2024-12-11T20:50:34,564 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159. 2024-12-11T20:50:34,564 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=91274771532f8854159bd2395a185159, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:50:34,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=117, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 91274771532f8854159bd2395a185159, server=5be53b084ac7,39865,1733949995112 because future has completed 2024-12-11T20:50:34,573 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=117, resume processing ppid=115 2024-12-11T20:50:34,573 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=117, ppid=115, state=SUCCESS, hasLock=false; OpenRegionProcedure 91274771532f8854159bd2395a185159, server=5be53b084ac7,39865,1733949995112 in 301 msec 2024-12-11T20:50:34,577 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=115, resume processing ppid=114 2024-12-11T20:50:34,577 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=115, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=91274771532f8854159bd2395a185159, ASSIGN in 466 msec 2024-12-11T20:50:34,578 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T20:50:34,580 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950234578"}]},"ts":"1733950234578"} 2024-12-11T20:50:34,584 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-12-11T20:50:34,586 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T20:50:34,586 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-12-11T20:50:34,594 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-11T20:50:34,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:50:34,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:50:34,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:50:34,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:50:34,672 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-11T20:50:34,672 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-11T20:50:34,674 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-11T20:50:34,675 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-11T20:50:34,676 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=114, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports in 796 msec 2024-12-11T20:50:35,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-11T20:50:35,024 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-11T20:50:35,024 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testConsecutiveExports get assigned. Timeout = 60000ms 2024-12-11T20:50:35,025 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:50:35,033 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testConsecutiveExports assigned to meta. Checking AM states. 2024-12-11T20:50:35,033 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:50:35,033 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testConsecutiveExports assigned. 2024-12-11T20:50:35,034 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-11T20:50:35,041 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-11T20:50:35,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733950235041 (current time:1733950235041). 2024-12-11T20:50:35,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T20:50:35,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-11T20:50:35,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T20:50:35,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f2f7b8c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:50:35,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:50:35,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:50:35,049 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:50:35,050 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:50:35,050 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:50:35,050 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e7644f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:50:35,050 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:50:35,051 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:50:35,051 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:50:35,053 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35026, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:50:35,054 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51827a6e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:50:35,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:50:35,056 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:50:35,056 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:50:35,059 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56452, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:50:35,061 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:50:35,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:50:35,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:50:35,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:50:35,062 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:50:35,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@239fc446, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:50:35,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:50:35,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:50:35,084 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:50:35,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:50:35,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:50:35,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4415cf2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:50:35,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:50:35,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:50:35,086 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:50:35,087 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35056, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:50:35,088 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@155d2812, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:50:35,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:50:35,090 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:50:35,091 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:50:35,092 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56464, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:50:35,094 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:50:35,095 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:50:35,097 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55840, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:50:35,100 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:50:35,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:50:35,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:50:35,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:50:35,100 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:50:35,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-11T20:50:35,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T20:50:35,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-11T20:50:35,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-12-11T20:50:35,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-11T20:50:35,104 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T20:50:35,106 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T20:50:35,109 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T20:50:35,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-11T20:50:35,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742071_1247 (size=161) 2024-12-11T20:50:35,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742071_1247 (size=161) 2024-12-11T20:50:35,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742071_1247 (size=161) 2024-12-11T20:50:35,245 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T20:50:35,245 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 91274771532f8854159bd2395a185159}, {pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2db39fc82b20d27944df82162e98777e}] 2024-12-11T20:50:35,249 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2db39fc82b20d27944df82162e98777e 2024-12-11T20:50:35,249 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 91274771532f8854159bd2395a185159 2024-12-11T20:50:35,404 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=120 2024-12-11T20:50:35,404 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40781 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=121 2024-12-11T20:50:35,405 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159. 2024-12-11T20:50:35,405 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.HRegion(2603): Flush status journal for 91274771532f8854159bd2395a185159: 2024-12-11T20:50:35,405 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159. for emptySnaptb0-testConsecutiveExports completed. 2024-12-11T20:50:35,405 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-11T20:50:35,405 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:50:35,405 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T20:50:35,407 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e. 2024-12-11T20:50:35,407 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.HRegion(2603): Flush status journal for 2db39fc82b20d27944df82162e98777e: 2024-12-11T20:50:35,407 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e. for emptySnaptb0-testConsecutiveExports completed. 2024-12-11T20:50:35,407 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-11T20:50:35,407 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:50:35,407 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T20:50:35,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-11T20:50:35,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742072_1248 (size=68) 2024-12-11T20:50:35,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742072_1248 (size=68) 2024-12-11T20:50:35,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742072_1248 (size=68) 2024-12-11T20:50:35,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742073_1249 (size=68) 2024-12-11T20:50:35,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742073_1249 (size=68) 2024-12-11T20:50:35,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742073_1249 (size=68) 2024-12-11T20:50:35,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-11T20:50:35,971 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159. 2024-12-11T20:50:35,971 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=120 2024-12-11T20:50:35,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=120 2024-12-11T20:50:35,972 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 91274771532f8854159bd2395a185159 2024-12-11T20:50:35,973 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 91274771532f8854159bd2395a185159 2024-12-11T20:50:35,979 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=120, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 91274771532f8854159bd2395a185159 in 729 msec 2024-12-11T20:50:36,040 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e. 2024-12-11T20:50:36,040 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-12-11T20:50:36,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=121 2024-12-11T20:50:36,041 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 2db39fc82b20d27944df82162e98777e 2024-12-11T20:50:36,041 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2db39fc82b20d27944df82162e98777e 2024-12-11T20:50:36,047 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=121, resume processing ppid=119 2024-12-11T20:50:36,047 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=121, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 2db39fc82b20d27944df82162e98777e in 797 msec 2024-12-11T20:50:36,047 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T20:50:36,052 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T20:50:36,053 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T20:50:36,053 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-12-11T20:50:36,054 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-12-11T20:50:36,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-11T20:50:36,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742074_1250 (size=543) 2024-12-11T20:50:36,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742074_1250 (size=543) 2024-12-11T20:50:36,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742074_1250 (size=543) 2024-12-11T20:50:36,718 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T20:50:36,744 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T20:50:36,745 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-12-11T20:50:36,763 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T20:50:36,763 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-12-11T20:50:36,766 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=119, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 1.6620 sec 2024-12-11T20:50:37,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-11T20:50:37,258 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-12-11T20:50:37,263 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='0082c63baf0662add525b4b71acd71128', locateType=CURRENT is [region=testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:50:37,266 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='1022abf5c7062009f654dc84f1f025afa', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:50:37,272 DEBUG [Time-limited test {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='23dc9db8da797055cedb962c1c7775a4d', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:50:37,288 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39865 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T20:50:37,309 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40781 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T20:50:37,322 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-11T20:50:37,327 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-12-11T20:50:37,327 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159. 2024-12-11T20:50:37,327 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:50:37,333 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-11T20:50:37,341 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-11T20:50:37,365 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-11T20:50:37,370 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-11T20:50:37,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733950237370 (current time:1733950237370). 2024-12-11T20:50:37,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T20:50:37,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-11T20:50:37,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T20:50:37,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b540dc3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:50:37,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:50:37,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:50:37,375 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:50:37,375 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:50:37,375 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:50:37,376 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4895ae14, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:50:37,376 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:50:37,376 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:50:37,376 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:50:37,377 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35076, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:50:37,379 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43501491, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:50:37,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:50:37,380 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:50:37,381 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:50:37,383 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56476, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:50:37,385 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:50:37,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:50:37,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:50:37,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:50:37,386 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:50:37,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23e2df31, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:50:37,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:50:37,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:50:37,390 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:50:37,390 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:50:37,390 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:50:37,391 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@581a2169, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:50:37,391 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:50:37,391 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:50:37,392 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:50:37,394 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35098, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:50:37,395 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ca96871, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:50:37,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:50:37,398 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:50:37,398 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:50:37,400 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56486, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:50:37,405 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:50:37,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:50:37,407 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55846, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:50:37,409 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:50:37,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:50:37,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:50:37,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:50:37,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-11T20:50:37,410 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:50:37,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T20:50:37,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-11T20:50:37,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-11T20:50:37,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-11T20:50:37,415 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T20:50:37,423 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T20:50:37,427 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T20:50:37,463 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0004_000001 (auth:SIMPLE) from 127.0.0.1:35778 2024-12-11T20:50:37,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742075_1251 (size=156) 2024-12-11T20:50:37,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742075_1251 (size=156) 2024-12-11T20:50:37,506 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T20:50:37,506 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 91274771532f8854159bd2395a185159}, {pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2db39fc82b20d27944df82162e98777e}] 2024-12-11T20:50:37,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742075_1251 (size=156) 2024-12-11T20:50:37,511 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2db39fc82b20d27944df82162e98777e 2024-12-11T20:50:37,512 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 91274771532f8854159bd2395a185159 2024-12-11T20:50:37,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-11T20:50:37,533 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_3/usercache/jenkins/appcache/application_1733950008312_0004/container_1733950008312_0004_01_000001/launch_container.sh] 2024-12-11T20:50:37,533 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_3/usercache/jenkins/appcache/application_1733950008312_0004/container_1733950008312_0004_01_000001/container_tokens] 2024-12-11T20:50:37,533 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_3/usercache/jenkins/appcache/application_1733950008312_0004/container_1733950008312_0004_01_000001/sysfs] 2024-12-11T20:50:37,676 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=123 2024-12-11T20:50:37,679 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159. 2024-12-11T20:50:37,679 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2902): Flushing 91274771532f8854159bd2395a185159 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-11T20:50:37,680 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40781 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=124 2024-12-11T20:50:37,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e. 2024-12-11T20:50:37,681 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2902): Flushing 2db39fc82b20d27944df82162e98777e 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-11T20:50:37,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/91274771532f8854159bd2395a185159/.tmp/cf/54b540caf0244a7ab475d2fb8b833b5c is 71, key is 08ab9ec149ba2b11914bdfe057d4c3ca/cf:q/1733950237288/Put/seqid=0 2024-12-11T20:50:37,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-11T20:50:37,756 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/2db39fc82b20d27944df82162e98777e/.tmp/cf/42a27194a3504ec5920075f257175897 is 71, key is 165fbd08a1c2576f5203ec2e0ecaa5c3/cf:q/1733950237309/Put/seqid=0 2024-12-11T20:50:37,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742076_1252 (size=5424) 2024-12-11T20:50:37,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742076_1252 (size=5424) 2024-12-11T20:50:37,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742076_1252 (size=5424) 2024-12-11T20:50:37,777 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/91274771532f8854159bd2395a185159/.tmp/cf/54b540caf0244a7ab475d2fb8b833b5c 2024-12-11T20:50:37,790 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/91274771532f8854159bd2395a185159/.tmp/cf/54b540caf0244a7ab475d2fb8b833b5c as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/91274771532f8854159bd2395a185159/cf/54b540caf0244a7ab475d2fb8b833b5c 2024-12-11T20:50:37,804 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/91274771532f8854159bd2395a185159/cf/54b540caf0244a7ab475d2fb8b833b5c, entries=5, sequenceid=6, filesize=5.3 K 2024-12-11T20:50:37,805 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 91274771532f8854159bd2395a185159 in 126ms, sequenceid=6, compaction requested=false 2024-12-11T20:50:37,805 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2603): Flush status journal for 91274771532f8854159bd2395a185159: 2024-12-11T20:50:37,805 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159. for snaptb0-testConsecutiveExports completed. 2024-12-11T20:50:37,805 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-11T20:50:37,805 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:50:37,806 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/91274771532f8854159bd2395a185159/cf/54b540caf0244a7ab475d2fb8b833b5c] hfiles 2024-12-11T20:50:37,806 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/91274771532f8854159bd2395a185159/cf/54b540caf0244a7ab475d2fb8b833b5c for snapshot=snaptb0-testConsecutiveExports 2024-12-11T20:50:37,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742077_1253 (size=8190) 2024-12-11T20:50:37,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742077_1253 (size=8190) 2024-12-11T20:50:37,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742077_1253 (size=8190) 2024-12-11T20:50:37,967 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/2db39fc82b20d27944df82162e98777e/.tmp/cf/42a27194a3504ec5920075f257175897 2024-12-11T20:50:38,003 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/2db39fc82b20d27944df82162e98777e/.tmp/cf/42a27194a3504ec5920075f257175897 as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/2db39fc82b20d27944df82162e98777e/cf/42a27194a3504ec5920075f257175897 2024-12-11T20:50:38,010 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/2db39fc82b20d27944df82162e98777e/cf/42a27194a3504ec5920075f257175897, entries=45, sequenceid=6, filesize=8.0 K 2024-12-11T20:50:38,012 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 2db39fc82b20d27944df82162e98777e in 331ms, sequenceid=6, compaction requested=false 2024-12-11T20:50:38,013 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2603): Flush status journal for 2db39fc82b20d27944df82162e98777e: 2024-12-11T20:50:38,013 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e. for snaptb0-testConsecutiveExports completed. 2024-12-11T20:50:38,013 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-11T20:50:38,013 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:50:38,013 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/2db39fc82b20d27944df82162e98777e/cf/42a27194a3504ec5920075f257175897] hfiles 2024-12-11T20:50:38,013 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/2db39fc82b20d27944df82162e98777e/cf/42a27194a3504ec5920075f257175897 for snapshot=snaptb0-testConsecutiveExports 2024-12-11T20:50:38,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-11T20:50:38,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742078_1254 (size=107) 2024-12-11T20:50:38,102 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159. 2024-12-11T20:50:38,102 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-11T20:50:38,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742078_1254 (size=107) 2024-12-11T20:50:38,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742078_1254 (size=107) 2024-12-11T20:50:38,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=123 2024-12-11T20:50:38,103 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 91274771532f8854159bd2395a185159 2024-12-11T20:50:38,104 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 91274771532f8854159bd2395a185159 2024-12-11T20:50:38,109 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=123, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 91274771532f8854159bd2395a185159 in 600 msec 2024-12-11T20:50:38,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742079_1255 (size=107) 2024-12-11T20:50:38,203 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e. 2024-12-11T20:50:38,204 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=124 2024-12-11T20:50:38,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742079_1255 (size=107) 2024-12-11T20:50:38,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742079_1255 (size=107) 2024-12-11T20:50:38,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=124 2024-12-11T20:50:38,204 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 2db39fc82b20d27944df82162e98777e 2024-12-11T20:50:38,205 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2db39fc82b20d27944df82162e98777e 2024-12-11T20:50:38,224 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=124, resume processing ppid=122 2024-12-11T20:50:38,224 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=124, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 2db39fc82b20d27944df82162e98777e in 703 msec 2024-12-11T20:50:38,224 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T20:50:38,228 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T20:50:38,229 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T20:50:38,230 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-12-11T20:50:38,238 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-11T20:50:38,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742080_1256 (size=621) 2024-12-11T20:50:38,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742080_1256 (size=621) 2024-12-11T20:50:38,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742080_1256 (size=621) 2024-12-11T20:50:38,351 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T20:50:38,406 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T20:50:38,407 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-11T20:50:38,413 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T20:50:38,413 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-11T20:50:38,420 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=122, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 1.0020 sec 2024-12-11T20:50:38,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-11T20:50:38,553 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-12-11T20:50:38,554 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950238553 2024-12-11T20:50:38,554 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950238553, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950238553, srcFsUri=hdfs://localhost:36717, srcDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:50:38,670 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:36717, inputRoot=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:50:38,670 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@25b57f86, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950238553, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950238553/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-11T20:50:38,685 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T20:50:38,723 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950238553/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-11T20:50:38,937 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:50:38,937 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:50:38,938 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:50:39,017 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T20:50:40,808 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop-16913418836933566304.jar 2024-12-11T20:50:40,809 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:50:40,809 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:50:40,883 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop-5729237435314333925.jar 2024-12-11T20:50:40,883 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:50:40,883 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:50:40,884 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:50:40,884 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:50:40,885 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:50:40,885 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:50:40,885 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T20:50:40,885 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T20:50:40,886 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T20:50:40,886 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T20:50:40,886 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T20:50:40,886 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T20:50:40,887 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T20:50:40,887 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T20:50:40,887 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T20:50:40,887 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T20:50:40,888 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T20:50:40,888 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:50:40,888 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:50:40,888 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:50:40,888 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:50:40,889 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:50:40,889 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:50:40,889 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:50:40,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742081_1257 (size=131440) 2024-12-11T20:50:40,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742081_1257 (size=131440) 2024-12-11T20:50:40,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742081_1257 (size=131440) 2024-12-11T20:50:41,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742082_1258 (size=4188619) 2024-12-11T20:50:41,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742082_1258 (size=4188619) 2024-12-11T20:50:41,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742082_1258 (size=4188619) 2024-12-11T20:50:41,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742083_1259 (size=1323991) 2024-12-11T20:50:41,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742083_1259 (size=1323991) 2024-12-11T20:50:41,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742083_1259 (size=1323991) 2024-12-11T20:50:41,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742084_1260 (size=903544) 2024-12-11T20:50:41,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742084_1260 (size=903544) 2024-12-11T20:50:41,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742084_1260 (size=903544) 2024-12-11T20:50:41,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742085_1261 (size=8360360) 2024-12-11T20:50:41,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742085_1261 (size=8360360) 2024-12-11T20:50:41,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742085_1261 (size=8360360) 2024-12-11T20:50:41,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742086_1262 (size=443172) 2024-12-11T20:50:41,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742086_1262 (size=443172) 2024-12-11T20:50:41,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742086_1262 (size=443172) 2024-12-11T20:50:41,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742087_1263 (size=1877034) 2024-12-11T20:50:41,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742087_1263 (size=1877034) 2024-12-11T20:50:41,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742087_1263 (size=1877034) 2024-12-11T20:50:41,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742088_1264 (size=77835) 2024-12-11T20:50:41,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742088_1264 (size=77835) 2024-12-11T20:50:41,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742088_1264 (size=77835) 2024-12-11T20:50:41,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742089_1265 (size=30949) 2024-12-11T20:50:41,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742089_1265 (size=30949) 2024-12-11T20:50:41,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742089_1265 (size=30949) 2024-12-11T20:50:41,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742090_1266 (size=1597213) 2024-12-11T20:50:41,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742090_1266 (size=1597213) 2024-12-11T20:50:41,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742090_1266 (size=1597213) 2024-12-11T20:50:41,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742091_1267 (size=6426228) 2024-12-11T20:50:41,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742091_1267 (size=6426228) 2024-12-11T20:50:41,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742091_1267 (size=6426228) 2024-12-11T20:50:41,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742092_1268 (size=4695811) 2024-12-11T20:50:41,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742092_1268 (size=4695811) 2024-12-11T20:50:41,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742092_1268 (size=4695811) 2024-12-11T20:50:41,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742093_1269 (size=232957) 2024-12-11T20:50:41,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742093_1269 (size=232957) 2024-12-11T20:50:41,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742093_1269 (size=232957) 2024-12-11T20:50:41,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742094_1270 (size=127628) 2024-12-11T20:50:41,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742094_1270 (size=127628) 2024-12-11T20:50:41,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742094_1270 (size=127628) 2024-12-11T20:50:41,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742095_1271 (size=20406) 2024-12-11T20:50:41,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742095_1271 (size=20406) 2024-12-11T20:50:41,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742095_1271 (size=20406) 2024-12-11T20:50:41,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742096_1272 (size=5175431) 2024-12-11T20:50:41,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742096_1272 (size=5175431) 2024-12-11T20:50:41,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742096_1272 (size=5175431) 2024-12-11T20:50:41,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742097_1273 (size=217634) 2024-12-11T20:50:41,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742097_1273 (size=217634) 2024-12-11T20:50:41,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742097_1273 (size=217634) 2024-12-11T20:50:41,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742098_1274 (size=1832290) 2024-12-11T20:50:41,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742098_1274 (size=1832290) 2024-12-11T20:50:41,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742098_1274 (size=1832290) 2024-12-11T20:50:41,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742099_1275 (size=322274) 2024-12-11T20:50:41,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742099_1275 (size=322274) 2024-12-11T20:50:41,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742099_1275 (size=322274) 2024-12-11T20:50:41,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742100_1276 (size=503880) 2024-12-11T20:50:41,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742100_1276 (size=503880) 2024-12-11T20:50:41,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742100_1276 (size=503880) 2024-12-11T20:50:41,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742101_1277 (size=29229) 2024-12-11T20:50:41,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742101_1277 (size=29229) 2024-12-11T20:50:41,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742101_1277 (size=29229) 2024-12-11T20:50:41,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742102_1278 (size=24096) 2024-12-11T20:50:41,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742102_1278 (size=24096) 2024-12-11T20:50:41,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742102_1278 (size=24096) 2024-12-11T20:50:41,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742103_1279 (size=111872) 2024-12-11T20:50:41,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742103_1279 (size=111872) 2024-12-11T20:50:41,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742103_1279 (size=111872) 2024-12-11T20:50:41,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742104_1280 (size=45609) 2024-12-11T20:50:41,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742104_1280 (size=45609) 2024-12-11T20:50:41,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742104_1280 (size=45609) 2024-12-11T20:50:41,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742105_1281 (size=136454) 2024-12-11T20:50:41,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742105_1281 (size=136454) 2024-12-11T20:50:41,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742105_1281 (size=136454) 2024-12-11T20:50:42,101 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T20:50:42,103 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-11T20:50:42,105 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.0 K 2024-12-11T20:50:42,105 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.3 K 2024-12-11T20:50:42,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742106_1282 (size=441) 2024-12-11T20:50:42,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742106_1282 (size=441) 2024-12-11T20:50:42,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742106_1282 (size=441) 2024-12-11T20:50:42,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742107_1283 (size=21) 2024-12-11T20:50:42,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742107_1283 (size=21) 2024-12-11T20:50:42,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742107_1283 (size=21) 2024-12-11T20:50:42,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742108_1284 (size=304047) 2024-12-11T20:50:42,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742108_1284 (size=304047) 2024-12-11T20:50:42,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742108_1284 (size=304047) 2024-12-11T20:50:42,169 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T20:50:42,169 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T20:50:42,463 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0005_000001 (auth:SIMPLE) from 127.0.0.1:44730 2024-12-11T20:50:43,973 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-11T20:50:43,973 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-12-11T20:50:44,528 DEBUG [master/5be53b084ac7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 91274771532f8854159bd2395a185159 changed from -1.0 to 0.0, refreshing cache 2024-12-11T20:50:44,528 DEBUG [master/5be53b084ac7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 2db39fc82b20d27944df82162e98777e changed from -1.0 to 0.0, refreshing cache 2024-12-11T20:50:48,502 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0005_000001 (auth:SIMPLE) from 127.0.0.1:60102 2024-12-11T20:50:48,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742109_1285 (size=349745) 2024-12-11T20:50:48,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742109_1285 (size=349745) 2024-12-11T20:50:48,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742109_1285 (size=349745) 2024-12-11T20:50:50,768 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0005_000001 (auth:SIMPLE) from 127.0.0.1:44746 2024-12-11T20:50:50,768 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0005_000001 (auth:SIMPLE) from 127.0.0.1:45824 2024-12-11T20:50:55,767 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_0/usercache/jenkins/appcache/application_1733950008312_0005/container_1733950008312_0005_01_000003/launch_container.sh] 2024-12-11T20:50:55,767 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_0/usercache/jenkins/appcache/application_1733950008312_0005/container_1733950008312_0005_01_000003/container_tokens] 2024-12-11T20:50:55,767 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_0/usercache/jenkins/appcache/application_1733950008312_0005/container_1733950008312_0005_01_000003/sysfs] 2024-12-11T20:50:57,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742110_1286 (size=22231) 2024-12-11T20:50:57,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742110_1286 (size=22231) 2024-12-11T20:50:57,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742110_1286 (size=22231) 2024-12-11T20:50:57,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742111_1287 (size=463) 2024-12-11T20:50:57,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742111_1287 (size=463) 2024-12-11T20:50:57,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742111_1287 (size=463) 2024-12-11T20:50:57,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742112_1288 (size=22231) 2024-12-11T20:50:57,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742112_1288 (size=22231) 2024-12-11T20:50:57,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742112_1288 (size=22231) 2024-12-11T20:50:57,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742113_1289 (size=349745) 2024-12-11T20:50:57,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742113_1289 (size=349745) 2024-12-11T20:50:57,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742113_1289 (size=349745) 2024-12-11T20:50:59,410 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T20:50:59,410 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T20:50:59,428 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-12-11T20:50:59,428 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T20:50:59,428 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T20:50:59,429 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_754086280_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-11T20:50:59,443 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-11T20:50:59,443 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-11T20:50:59,443 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@25b57f86 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950238553/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950238553/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-11T20:50:59,443 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950238553/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-11T20:50:59,444 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950238553/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-11T20:50:59,452 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950238553, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950238553, srcFsUri=hdfs://localhost:36717, srcDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:50:59,499 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:36717, inputRoot=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:50:59,499 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@25b57f86, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950238553, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950238553/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-11T20:50:59,508 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T20:50:59,521 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950238553/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-11T20:50:59,593 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:50:59,594 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:50:59,594 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:51:00,822 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop-11045424696235197264.jar 2024-12-11T20:51:00,823 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:51:00,823 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:51:00,899 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop-2270103424364956586.jar 2024-12-11T20:51:00,900 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:51:00,900 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:51:00,900 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:51:00,901 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:51:00,901 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:51:00,901 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:51:00,902 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T20:51:00,902 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T20:51:00,902 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T20:51:00,902 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T20:51:00,903 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T20:51:00,903 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T20:51:00,903 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T20:51:00,903 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T20:51:00,903 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T20:51:00,904 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T20:51:00,904 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T20:51:00,904 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:51:00,905 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:51:00,905 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:51:00,905 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:51:00,905 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:51:00,906 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:51:00,906 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:51:00,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742114_1290 (size=131440) 2024-12-11T20:51:00,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742114_1290 (size=131440) 2024-12-11T20:51:00,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742114_1290 (size=131440) 2024-12-11T20:51:01,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742115_1291 (size=4188619) 2024-12-11T20:51:01,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742115_1291 (size=4188619) 2024-12-11T20:51:01,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742115_1291 (size=4188619) 2024-12-11T20:51:01,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742116_1292 (size=1323991) 2024-12-11T20:51:01,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742116_1292 (size=1323991) 2024-12-11T20:51:01,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742116_1292 (size=1323991) 2024-12-11T20:51:01,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742117_1293 (size=903544) 2024-12-11T20:51:01,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742117_1293 (size=903544) 2024-12-11T20:51:01,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742117_1293 (size=903544) 2024-12-11T20:51:01,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742118_1294 (size=8360360) 2024-12-11T20:51:01,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742118_1294 (size=8360360) 2024-12-11T20:51:01,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742118_1294 (size=8360360) 2024-12-11T20:51:01,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742119_1295 (size=443172) 2024-12-11T20:51:01,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742119_1295 (size=443172) 2024-12-11T20:51:01,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742119_1295 (size=443172) 2024-12-11T20:51:01,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742120_1296 (size=1877034) 2024-12-11T20:51:01,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742120_1296 (size=1877034) 2024-12-11T20:51:01,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742120_1296 (size=1877034) 2024-12-11T20:51:01,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742121_1297 (size=77835) 2024-12-11T20:51:01,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742121_1297 (size=77835) 2024-12-11T20:51:01,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742121_1297 (size=77835) 2024-12-11T20:51:01,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742122_1298 (size=30949) 2024-12-11T20:51:01,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742122_1298 (size=30949) 2024-12-11T20:51:01,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742122_1298 (size=30949) 2024-12-11T20:51:01,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742123_1299 (size=1597213) 2024-12-11T20:51:01,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742123_1299 (size=1597213) 2024-12-11T20:51:01,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742123_1299 (size=1597213) 2024-12-11T20:51:01,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742124_1300 (size=6426228) 2024-12-11T20:51:01,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742124_1300 (size=6426228) 2024-12-11T20:51:01,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742124_1300 (size=6426228) 2024-12-11T20:51:01,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742125_1301 (size=4695811) 2024-12-11T20:51:01,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742125_1301 (size=4695811) 2024-12-11T20:51:01,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742125_1301 (size=4695811) 2024-12-11T20:51:01,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742126_1302 (size=232957) 2024-12-11T20:51:01,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742126_1302 (size=232957) 2024-12-11T20:51:01,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742126_1302 (size=232957) 2024-12-11T20:51:01,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742127_1303 (size=127628) 2024-12-11T20:51:01,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742127_1303 (size=127628) 2024-12-11T20:51:01,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742127_1303 (size=127628) 2024-12-11T20:51:01,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742128_1304 (size=20406) 2024-12-11T20:51:01,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742128_1304 (size=20406) 2024-12-11T20:51:01,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742128_1304 (size=20406) 2024-12-11T20:51:01,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742129_1305 (size=5175431) 2024-12-11T20:51:01,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742129_1305 (size=5175431) 2024-12-11T20:51:01,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742129_1305 (size=5175431) 2024-12-11T20:51:01,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742130_1306 (size=217634) 2024-12-11T20:51:01,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742130_1306 (size=217634) 2024-12-11T20:51:01,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742130_1306 (size=217634) 2024-12-11T20:51:01,784 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T20:51:02,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742131_1307 (size=1832290) 2024-12-11T20:51:02,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742131_1307 (size=1832290) 2024-12-11T20:51:02,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742131_1307 (size=1832290) 2024-12-11T20:51:02,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742132_1308 (size=322274) 2024-12-11T20:51:02,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742132_1308 (size=322274) 2024-12-11T20:51:02,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742132_1308 (size=322274) 2024-12-11T20:51:02,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742133_1309 (size=503880) 2024-12-11T20:51:02,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742133_1309 (size=503880) 2024-12-11T20:51:02,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742133_1309 (size=503880) 2024-12-11T20:51:02,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742134_1310 (size=29229) 2024-12-11T20:51:02,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742134_1310 (size=29229) 2024-12-11T20:51:02,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742134_1310 (size=29229) 2024-12-11T20:51:02,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742135_1311 (size=24096) 2024-12-11T20:51:02,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742135_1311 (size=24096) 2024-12-11T20:51:02,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742135_1311 (size=24096) 2024-12-11T20:51:02,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742136_1312 (size=111872) 2024-12-11T20:51:02,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742136_1312 (size=111872) 2024-12-11T20:51:02,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742136_1312 (size=111872) 2024-12-11T20:51:02,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742137_1313 (size=45609) 2024-12-11T20:51:02,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742137_1313 (size=45609) 2024-12-11T20:51:02,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742137_1313 (size=45609) 2024-12-11T20:51:02,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742138_1314 (size=136454) 2024-12-11T20:51:02,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742138_1314 (size=136454) 2024-12-11T20:51:02,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742138_1314 (size=136454) 2024-12-11T20:51:02,184 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T20:51:02,187 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-11T20:51:02,188 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.0 K 2024-12-11T20:51:02,189 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.3 K 2024-12-11T20:51:02,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742139_1315 (size=441) 2024-12-11T20:51:02,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742139_1315 (size=441) 2024-12-11T20:51:02,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742139_1315 (size=441) 2024-12-11T20:51:02,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742140_1316 (size=21) 2024-12-11T20:51:02,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742140_1316 (size=21) 2024-12-11T20:51:02,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742140_1316 (size=21) 2024-12-11T20:51:02,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742141_1317 (size=304047) 2024-12-11T20:51:02,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742141_1317 (size=304047) 2024-12-11T20:51:02,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742141_1317 (size=304047) 2024-12-11T20:51:02,354 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_0/usercache/jenkins/appcache/application_1733950008312_0005/container_1733950008312_0005_01_000002/launch_container.sh] 2024-12-11T20:51:02,356 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_0/usercache/jenkins/appcache/application_1733950008312_0005/container_1733950008312_0005_01_000002/container_tokens] 2024-12-11T20:51:02,356 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_0/usercache/jenkins/appcache/application_1733950008312_0005/container_1733950008312_0005_01_000002/sysfs] 2024-12-11T20:51:03,920 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T20:51:03,920 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T20:51:03,926 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0005_000001 (auth:SIMPLE) from 127.0.0.1:34916 2024-12-11T20:51:03,945 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_0/usercache/jenkins/appcache/application_1733950008312_0005/container_1733950008312_0005_01_000001/launch_container.sh] 2024-12-11T20:51:03,945 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_0/usercache/jenkins/appcache/application_1733950008312_0005/container_1733950008312_0005_01_000001/container_tokens] 2024-12-11T20:51:03,945 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_0/usercache/jenkins/appcache/application_1733950008312_0005/container_1733950008312_0005_01_000001/sysfs] 2024-12-11T20:51:04,284 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0006_000001 (auth:SIMPLE) from 127.0.0.1:33028 2024-12-11T20:51:15,784 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0006_000001 (auth:SIMPLE) from 127.0.0.1:53390 2024-12-11T20:51:16,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742142_1318 (size=349745) 2024-12-11T20:51:16,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742142_1318 (size=349745) 2024-12-11T20:51:16,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742142_1318 (size=349745) 2024-12-11T20:51:18,019 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0006_000001 (auth:SIMPLE) from 127.0.0.1:48242 2024-12-11T20:51:18,019 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0006_000001 (auth:SIMPLE) from 127.0.0.1:56276 2024-12-11T20:51:19,444 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 2db39fc82b20d27944df82162e98777e, had cached 0 bytes from a total of 8190 2024-12-11T20:51:19,492 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 91274771532f8854159bd2395a185159, had cached 0 bytes from a total of 5424 2024-12-11T20:51:22,191 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_1/usercache/jenkins/appcache/application_1733950008312_0006/container_1733950008312_0006_01_000003/launch_container.sh] 2024-12-11T20:51:22,192 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_1/usercache/jenkins/appcache/application_1733950008312_0006/container_1733950008312_0006_01_000003/container_tokens] 2024-12-11T20:51:22,192 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_1/usercache/jenkins/appcache/application_1733950008312_0006/container_1733950008312_0006_01_000003/sysfs] 2024-12-11T20:51:24,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742143_1319 (size=21198) 2024-12-11T20:51:24,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742143_1319 (size=21198) 2024-12-11T20:51:24,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742143_1319 (size=21198) 2024-12-11T20:51:24,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742144_1320 (size=463) 2024-12-11T20:51:24,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742144_1320 (size=463) 2024-12-11T20:51:24,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742144_1320 (size=463) 2024-12-11T20:51:24,248 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_1/usercache/jenkins/appcache/application_1733950008312_0006/container_1733950008312_0006_01_000002/launch_container.sh] 2024-12-11T20:51:24,248 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_1/usercache/jenkins/appcache/application_1733950008312_0006/container_1733950008312_0006_01_000002/container_tokens] 2024-12-11T20:51:24,249 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_1/usercache/jenkins/appcache/application_1733950008312_0006/container_1733950008312_0006_01_000002/sysfs] 2024-12-11T20:51:24,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742145_1321 (size=21198) 2024-12-11T20:51:24,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742145_1321 (size=21198) 2024-12-11T20:51:24,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742145_1321 (size=21198) 2024-12-11T20:51:24,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742146_1322 (size=349745) 2024-12-11T20:51:24,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742146_1322 (size=349745) 2024-12-11T20:51:24,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742146_1322 (size=349745) 2024-12-11T20:51:24,439 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0006_000001 (auth:SIMPLE) from 127.0.0.1:42996 2024-12-11T20:51:25,932 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T20:51:25,932 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T20:51:25,945 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-12-11T20:51:25,945 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T20:51:25,945 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T20:51:25,945 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_754086280_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-11T20:51:25,946 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-11T20:51:25,947 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-11T20:51:25,947 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@25b57f86 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950238553/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950238553/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-11T20:51:25,947 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950238553/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-11T20:51:25,947 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950238553/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-11T20:51:25,965 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-12-11T20:51:25,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=125, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports 2024-12-11T20:51:25,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-11T20:51:25,971 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950285971"}]},"ts":"1733950285971"} 2024-12-11T20:51:25,974 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-12-11T20:51:25,974 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-12-11T20:51:25,976 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-12-11T20:51:25,979 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=91274771532f8854159bd2395a185159, UNASSIGN}, {pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=2db39fc82b20d27944df82162e98777e, UNASSIGN}] 2024-12-11T20:51:25,981 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=2db39fc82b20d27944df82162e98777e, UNASSIGN 2024-12-11T20:51:25,982 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=91274771532f8854159bd2395a185159, UNASSIGN 2024-12-11T20:51:25,983 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=91274771532f8854159bd2395a185159, regionState=CLOSING, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:51:25,984 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=2db39fc82b20d27944df82162e98777e, regionState=CLOSING, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:51:25,989 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=91274771532f8854159bd2395a185159, UNASSIGN because future has completed 2024-12-11T20:51:25,989 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T20:51:25,989 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 91274771532f8854159bd2395a185159, server=5be53b084ac7,39865,1733949995112}] 2024-12-11T20:51:25,992 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=2db39fc82b20d27944df82162e98777e, UNASSIGN because future has completed 2024-12-11T20:51:25,992 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T20:51:25,994 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=130, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2db39fc82b20d27944df82162e98777e, server=5be53b084ac7,40781,1733949995244}] 2024-12-11T20:51:26,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-11T20:51:26,143 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(122): Close 91274771532f8854159bd2395a185159 2024-12-11T20:51:26,144 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T20:51:26,144 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1722): Closing 91274771532f8854159bd2395a185159, disabling compactions & flushes 2024-12-11T20:51:26,144 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159. 2024-12-11T20:51:26,144 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159. 2024-12-11T20:51:26,144 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159. after waiting 0 ms 2024-12-11T20:51:26,144 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159. 2024-12-11T20:51:26,150 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(122): Close 2db39fc82b20d27944df82162e98777e 2024-12-11T20:51:26,150 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T20:51:26,150 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1722): Closing 2db39fc82b20d27944df82162e98777e, disabling compactions & flushes 2024-12-11T20:51:26,150 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e. 2024-12-11T20:51:26,150 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e. 2024-12-11T20:51:26,150 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e. after waiting 0 ms 2024-12-11T20:51:26,150 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e. 2024-12-11T20:51:26,172 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/91274771532f8854159bd2395a185159/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T20:51:26,173 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:51:26,174 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159. 2024-12-11T20:51:26,174 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1676): Region close journal for 91274771532f8854159bd2395a185159: Waiting for close lock at 1733950286144Running coprocessor pre-close hooks at 1733950286144Disabling compacts and flushes for region at 1733950286144Disabling writes for close at 1733950286144Writing region close event to WAL at 1733950286147 (+3 ms)Running coprocessor post-close hooks at 1733950286173 (+26 ms)Closed at 1733950286174 (+1 ms) 2024-12-11T20:51:26,177 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(157): Closed 91274771532f8854159bd2395a185159 2024-12-11T20:51:26,178 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=91274771532f8854159bd2395a185159, regionState=CLOSED 2024-12-11T20:51:26,181 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 91274771532f8854159bd2395a185159, server=5be53b084ac7,39865,1733949995112 because future has completed 2024-12-11T20:51:26,183 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/2db39fc82b20d27944df82162e98777e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T20:51:26,184 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:51:26,184 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e. 2024-12-11T20:51:26,184 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1676): Region close journal for 2db39fc82b20d27944df82162e98777e: Waiting for close lock at 1733950286150Running coprocessor pre-close hooks at 1733950286150Disabling compacts and flushes for region at 1733950286150Disabling writes for close at 1733950286150Writing region close event to WAL at 1733950286154 (+4 ms)Running coprocessor post-close hooks at 1733950286184 (+30 ms)Closed at 1733950286184 2024-12-11T20:51:26,185 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=129, resume processing ppid=127 2024-12-11T20:51:26,185 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=129, ppid=127, state=SUCCESS, hasLock=false; CloseRegionProcedure 91274771532f8854159bd2395a185159, server=5be53b084ac7,39865,1733949995112 in 193 msec 2024-12-11T20:51:26,186 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(157): Closed 2db39fc82b20d27944df82162e98777e 2024-12-11T20:51:26,186 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=127, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=91274771532f8854159bd2395a185159, UNASSIGN in 206 msec 2024-12-11T20:51:26,187 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=2db39fc82b20d27944df82162e98777e, regionState=CLOSED 2024-12-11T20:51:26,188 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=130, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2db39fc82b20d27944df82162e98777e, server=5be53b084ac7,40781,1733949995244 because future has completed 2024-12-11T20:51:26,190 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=130, resume processing ppid=128 2024-12-11T20:51:26,190 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=130, ppid=128, state=SUCCESS, hasLock=false; CloseRegionProcedure 2db39fc82b20d27944df82162e98777e, server=5be53b084ac7,40781,1733949995244 in 197 msec 2024-12-11T20:51:26,192 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=128, resume processing ppid=126 2024-12-11T20:51:26,192 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=128, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=2db39fc82b20d27944df82162e98777e, UNASSIGN in 211 msec 2024-12-11T20:51:26,194 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=126, resume processing ppid=125 2024-12-11T20:51:26,194 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=126, ppid=125, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 218 msec 2024-12-11T20:51:26,196 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950286195"}]},"ts":"1733950286195"} 2024-12-11T20:51:26,197 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-12-11T20:51:26,198 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-12-11T20:51:26,203 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=125, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports in 236 msec 2024-12-11T20:51:26,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-11T20:51:26,283 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-11T20:51:26,284 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-12-11T20:51:26,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-11T20:51:26,285 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-11T20:51:26,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testConsecutiveExports 2024-12-11T20:51:26,286 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=131, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-11T20:51:26,288 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-12-11T20:51:26,289 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/91274771532f8854159bd2395a185159 2024-12-11T20:51:26,289 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/2db39fc82b20d27944df82162e98777e 2024-12-11T20:51:26,291 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/91274771532f8854159bd2395a185159/cf, FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/91274771532f8854159bd2395a185159/recovered.edits] 2024-12-11T20:51:26,291 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/2db39fc82b20d27944df82162e98777e/cf, FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/2db39fc82b20d27944df82162e98777e/recovered.edits] 2024-12-11T20:51:26,294 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/2db39fc82b20d27944df82162e98777e/cf/42a27194a3504ec5920075f257175897 to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testConsecutiveExports/2db39fc82b20d27944df82162e98777e/cf/42a27194a3504ec5920075f257175897 2024-12-11T20:51:26,312 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/2db39fc82b20d27944df82162e98777e/recovered.edits/9.seqid to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testConsecutiveExports/2db39fc82b20d27944df82162e98777e/recovered.edits/9.seqid 2024-12-11T20:51:26,326 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/2db39fc82b20d27944df82162e98777e 2024-12-11T20:51:26,326 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/91274771532f8854159bd2395a185159/cf/54b540caf0244a7ab475d2fb8b833b5c to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testConsecutiveExports/91274771532f8854159bd2395a185159/cf/54b540caf0244a7ab475d2fb8b833b5c 2024-12-11T20:51:26,332 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/91274771532f8854159bd2395a185159/recovered.edits/9.seqid to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testConsecutiveExports/91274771532f8854159bd2395a185159/recovered.edits/9.seqid 2024-12-11T20:51:26,333 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testConsecutiveExports/91274771532f8854159bd2395a185159 2024-12-11T20:51:26,333 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-12-11T20:51:26,336 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=131, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-11T20:51:26,343 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-12-11T20:51:26,352 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testConsecutiveExports' descriptor. 2024-12-11T20:51:26,357 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=131, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-11T20:51:26,357 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testConsecutiveExports' from region states. 2024-12-11T20:51:26,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-11T20:51:26,357 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733950286357"}]},"ts":"9223372036854775807"} 2024-12-11T20:51:26,357 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733950286357"}]},"ts":"9223372036854775807"} 2024-12-11T20:51:26,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-11T20:51:26,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-11T20:51:26,360 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-11T20:51:26,360 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-11T20:51:26,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-11T20:51:26,363 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-11T20:51:26,364 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T20:51:26,364 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 91274771532f8854159bd2395a185159, NAME => 'testtb-testConsecutiveExports,,1733950233876.91274771532f8854159bd2395a185159.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 2db39fc82b20d27944df82162e98777e, NAME => 'testtb-testConsecutiveExports,1,1733950233876.2db39fc82b20d27944df82162e98777e.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T20:51:26,365 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testConsecutiveExports' as deleted. 2024-12-11T20:51:26,365 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733950286365"}]},"ts":"9223372036854775807"} 2024-12-11T20:51:26,370 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testConsecutiveExports state from META 2024-12-11T20:51:26,371 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=131, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-11T20:51:26,373 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=131, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports in 87 msec 2024-12-11T20:51:26,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-11T20:51:26,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-11T20:51:26,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:51:26,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-11T20:51:26,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:51:26,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:51:26,379 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data null 2024-12-11T20:51:26,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:51:26,379 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-11T20:51:26,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-12-11T20:51:26,380 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testConsecutiveExports 2024-12-11T20:51:26,380 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-11T20:51:26,387 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" type: DISABLED 2024-12-11T20:51:26,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-12-11T20:51:26,390 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" type: DISABLED 2024-12-11T20:51:26,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testConsecutiveExports 2024-12-11T20:51:26,418 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=806 (was 799) Potentially hanging thread: HFileArchiver-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1549634170_1 at /127.0.0.1:47438 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1549634170_1 at /127.0.0.1:38386 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35249 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46371 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_754086280_22 at /127.0.0.1:47470 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-4921 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Client (166867004) connection to localhost/127.0.0.1:34027 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (166867004) connection to localhost/127.0.0.1:46371 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_754086280_22 at /127.0.0.1:38410 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (166867004) connection to localhost/127.0.0.1:35249 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 124681) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=793 (was 796), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1211 (was 1006) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 29), AvailableMemoryMB=1067 (was 1284) 2024-12-11T20:51:26,418 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=806 is superior to 500 2024-12-11T20:51:26,437 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=806, OpenFileDescriptor=793, MaxFileDescriptor=1048576, SystemLoadAverage=1211, ProcessCount=17, AvailableMemoryMB=1066 2024-12-11T20:51:26,437 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=806 is superior to 500 2024-12-11T20:51:26,439 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T20:51:26,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:26,441 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T20:51:26,442 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:51:26,442 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 132 2024-12-11T20:51:26,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-11T20:51:26,443 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T20:51:26,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742147_1323 (size=422) 2024-12-11T20:51:26,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742147_1323 (size=422) 2024-12-11T20:51:26,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742147_1323 (size=422) 2024-12-11T20:51:26,454 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 334f3e62817613989e9752453a3409fa, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:51:26,454 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 543d1a3f52e9986ac920e4d97e5bcf0e, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:51:26,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742148_1324 (size=83) 2024-12-11T20:51:26,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742149_1325 (size=83) 2024-12-11T20:51:26,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742149_1325 (size=83) 2024-12-11T20:51:26,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742149_1325 (size=83) 2024-12-11T20:51:26,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742148_1324 (size=83) 2024-12-11T20:51:26,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742148_1324 (size=83) 2024-12-11T20:51:26,474 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:51:26,474 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1722): Closing 334f3e62817613989e9752453a3409fa, disabling compactions & flushes 2024-12-11T20:51:26,474 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa. 2024-12-11T20:51:26,474 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa. 2024-12-11T20:51:26,474 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa. after waiting 0 ms 2024-12-11T20:51:26,474 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa. 2024-12-11T20:51:26,474 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa. 2024-12-11T20:51:26,474 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for 334f3e62817613989e9752453a3409fa: Waiting for close lock at 1733950286474Disabling compacts and flushes for region at 1733950286474Disabling writes for close at 1733950286474Writing region close event to WAL at 1733950286474Closed at 1733950286474 2024-12-11T20:51:26,474 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:51:26,475 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1722): Closing 543d1a3f52e9986ac920e4d97e5bcf0e, disabling compactions & flushes 2024-12-11T20:51:26,475 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e. 2024-12-11T20:51:26,475 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e. 2024-12-11T20:51:26,475 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e. after waiting 0 ms 2024-12-11T20:51:26,475 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e. 2024-12-11T20:51:26,475 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e. 2024-12-11T20:51:26,475 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 543d1a3f52e9986ac920e4d97e5bcf0e: Waiting for close lock at 1733950286475Disabling compacts and flushes for region at 1733950286475Disabling writes for close at 1733950286475Writing region close event to WAL at 1733950286475Closed at 1733950286475 2024-12-11T20:51:26,476 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T20:51:26,476 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733950286476"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950286476"}]},"ts":"1733950286476"} 2024-12-11T20:51:26,476 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733950286476"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950286476"}]},"ts":"1733950286476"} 2024-12-11T20:51:26,479 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T20:51:26,480 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T20:51:26,480 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950286480"}]},"ts":"1733950286480"} 2024-12-11T20:51:26,484 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-12-11T20:51:26,484 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {5be53b084ac7=0} racks are {/default-rack=0} 2024-12-11T20:51:26,485 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T20:51:26,485 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T20:51:26,485 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T20:51:26,485 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T20:51:26,485 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T20:51:26,485 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T20:51:26,485 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T20:51:26,485 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T20:51:26,485 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T20:51:26,485 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T20:51:26,486 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=543d1a3f52e9986ac920e4d97e5bcf0e, ASSIGN}, {pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=334f3e62817613989e9752453a3409fa, ASSIGN}] 2024-12-11T20:51:26,487 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=334f3e62817613989e9752453a3409fa, ASSIGN 2024-12-11T20:51:26,487 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=543d1a3f52e9986ac920e4d97e5bcf0e, ASSIGN 2024-12-11T20:51:26,488 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=334f3e62817613989e9752453a3409fa, ASSIGN; state=OFFLINE, location=5be53b084ac7,40781,1733949995244; forceNewPlan=false, retain=false 2024-12-11T20:51:26,488 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=543d1a3f52e9986ac920e4d97e5bcf0e, ASSIGN; state=OFFLINE, location=5be53b084ac7,40355,1733949994820; forceNewPlan=false, retain=false 2024-12-11T20:51:26,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-11T20:51:26,639 INFO [5be53b084ac7:41779 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T20:51:26,643 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=334f3e62817613989e9752453a3409fa, regionState=OPENING, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:51:26,644 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=543d1a3f52e9986ac920e4d97e5bcf0e, regionState=OPENING, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:51:26,646 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=334f3e62817613989e9752453a3409fa, ASSIGN because future has completed 2024-12-11T20:51:26,646 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure 334f3e62817613989e9752453a3409fa, server=5be53b084ac7,40781,1733949995244}] 2024-12-11T20:51:26,648 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=543d1a3f52e9986ac920e4d97e5bcf0e, ASSIGN because future has completed 2024-12-11T20:51:26,649 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=136, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 543d1a3f52e9986ac920e4d97e5bcf0e, server=5be53b084ac7,40355,1733949994820}] 2024-12-11T20:51:26,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-11T20:51:26,832 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa. 2024-12-11T20:51:26,832 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7752): Opening region: {ENCODED => 334f3e62817613989e9752453a3409fa, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T20:51:26,833 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa. service=AccessControlService 2024-12-11T20:51:26,833 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:51:26,833 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 334f3e62817613989e9752453a3409fa 2024-12-11T20:51:26,833 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:51:26,833 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7794): checking encryption for 334f3e62817613989e9752453a3409fa 2024-12-11T20:51:26,833 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7797): checking classloading for 334f3e62817613989e9752453a3409fa 2024-12-11T20:51:26,886 INFO [StoreOpener-334f3e62817613989e9752453a3409fa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 334f3e62817613989e9752453a3409fa 2024-12-11T20:51:26,887 INFO [StoreOpener-334f3e62817613989e9752453a3409fa-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 334f3e62817613989e9752453a3409fa columnFamilyName cf 2024-12-11T20:51:26,887 DEBUG [StoreOpener-334f3e62817613989e9752453a3409fa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:51:26,888 INFO [StoreOpener-334f3e62817613989e9752453a3409fa-1 {}] regionserver.HStore(327): Store=334f3e62817613989e9752453a3409fa/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:51:26,888 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1038): replaying wal for 334f3e62817613989e9752453a3409fa 2024-12-11T20:51:26,891 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e. 2024-12-11T20:51:26,892 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7752): Opening region: {ENCODED => 543d1a3f52e9986ac920e4d97e5bcf0e, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T20:51:26,892 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e. service=AccessControlService 2024-12-11T20:51:26,892 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:51:26,892 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 543d1a3f52e9986ac920e4d97e5bcf0e 2024-12-11T20:51:26,892 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/334f3e62817613989e9752453a3409fa 2024-12-11T20:51:26,892 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:51:26,893 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7794): checking encryption for 543d1a3f52e9986ac920e4d97e5bcf0e 2024-12-11T20:51:26,893 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7797): checking classloading for 543d1a3f52e9986ac920e4d97e5bcf0e 2024-12-11T20:51:26,893 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/334f3e62817613989e9752453a3409fa 2024-12-11T20:51:26,893 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1048): stopping wal replay for 334f3e62817613989e9752453a3409fa 2024-12-11T20:51:26,893 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1060): Cleaning up temporary data for 334f3e62817613989e9752453a3409fa 2024-12-11T20:51:26,897 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1093): writing seq id for 334f3e62817613989e9752453a3409fa 2024-12-11T20:51:26,907 INFO [StoreOpener-543d1a3f52e9986ac920e4d97e5bcf0e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 543d1a3f52e9986ac920e4d97e5bcf0e 2024-12-11T20:51:26,910 INFO [StoreOpener-543d1a3f52e9986ac920e4d97e5bcf0e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 543d1a3f52e9986ac920e4d97e5bcf0e columnFamilyName cf 2024-12-11T20:51:26,910 DEBUG [StoreOpener-543d1a3f52e9986ac920e4d97e5bcf0e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:51:26,912 INFO [StoreOpener-543d1a3f52e9986ac920e4d97e5bcf0e-1 {}] regionserver.HStore(327): Store=543d1a3f52e9986ac920e4d97e5bcf0e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:51:26,912 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1038): replaying wal for 543d1a3f52e9986ac920e4d97e5bcf0e 2024-12-11T20:51:26,913 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/543d1a3f52e9986ac920e4d97e5bcf0e 2024-12-11T20:51:26,913 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/543d1a3f52e9986ac920e4d97e5bcf0e 2024-12-11T20:51:26,913 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1048): stopping wal replay for 543d1a3f52e9986ac920e4d97e5bcf0e 2024-12-11T20:51:26,913 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1060): Cleaning up temporary data for 543d1a3f52e9986ac920e4d97e5bcf0e 2024-12-11T20:51:26,914 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/334f3e62817613989e9752453a3409fa/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:51:26,914 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1114): Opened 334f3e62817613989e9752453a3409fa; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64921538, jitterRate=-0.03259369730949402}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:51:26,914 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 334f3e62817613989e9752453a3409fa 2024-12-11T20:51:26,915 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1006): Region open journal for 334f3e62817613989e9752453a3409fa: Running coprocessor pre-open hook at 1733950286834Writing region info on filesystem at 1733950286834Initializing all the Stores at 1733950286885 (+51 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950286885Cleaning up temporary data from old regions at 1733950286893 (+8 ms)Running coprocessor post-open hooks at 1733950286914 (+21 ms)Region opened successfully at 1733950286915 (+1 ms) 2024-12-11T20:51:26,919 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa., pid=135, masterSystemTime=1733950286803 2024-12-11T20:51:26,920 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1093): writing seq id for 543d1a3f52e9986ac920e4d97e5bcf0e 2024-12-11T20:51:26,922 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa. 2024-12-11T20:51:26,923 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa. 2024-12-11T20:51:26,923 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=334f3e62817613989e9752453a3409fa, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:51:26,927 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=135, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure 334f3e62817613989e9752453a3409fa, server=5be53b084ac7,40781,1733949995244 because future has completed 2024-12-11T20:51:26,929 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/543d1a3f52e9986ac920e4d97e5bcf0e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:51:26,930 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1114): Opened 543d1a3f52e9986ac920e4d97e5bcf0e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70709890, jitterRate=0.05365946888923645}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:51:26,930 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 543d1a3f52e9986ac920e4d97e5bcf0e 2024-12-11T20:51:26,930 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1006): Region open journal for 543d1a3f52e9986ac920e4d97e5bcf0e: Running coprocessor pre-open hook at 1733950286893Writing region info on filesystem at 1733950286893Initializing all the Stores at 1733950286893Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950286893Cleaning up temporary data from old regions at 1733950286913 (+20 ms)Running coprocessor post-open hooks at 1733950286930 (+17 ms)Region opened successfully at 1733950286930 2024-12-11T20:51:26,936 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e., pid=136, masterSystemTime=1733950286803 2024-12-11T20:51:26,944 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=135, resume processing ppid=134 2024-12-11T20:51:26,944 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=135, ppid=134, state=SUCCESS, hasLock=false; OpenRegionProcedure 334f3e62817613989e9752453a3409fa, server=5be53b084ac7,40781,1733949995244 in 283 msec 2024-12-11T20:51:26,945 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e. 2024-12-11T20:51:26,945 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e. 2024-12-11T20:51:26,951 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=134, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=334f3e62817613989e9752453a3409fa, ASSIGN in 458 msec 2024-12-11T20:51:26,952 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=543d1a3f52e9986ac920e4d97e5bcf0e, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:51:26,955 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=136, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 543d1a3f52e9986ac920e4d97e5bcf0e, server=5be53b084ac7,40355,1733949994820 because future has completed 2024-12-11T20:51:26,966 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=136, resume processing ppid=133 2024-12-11T20:51:26,966 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=136, ppid=133, state=SUCCESS, hasLock=false; OpenRegionProcedure 543d1a3f52e9986ac920e4d97e5bcf0e, server=5be53b084ac7,40355,1733949994820 in 309 msec 2024-12-11T20:51:26,973 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=133, resume processing ppid=132 2024-12-11T20:51:26,973 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=133, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=543d1a3f52e9986ac920e4d97e5bcf0e, ASSIGN in 480 msec 2024-12-11T20:51:26,975 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T20:51:26,975 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950286975"}]},"ts":"1733950286975"} 2024-12-11T20:51:26,984 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-12-11T20:51:26,985 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T20:51:26,986 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-12-11T20:51:26,994 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-11T20:51:27,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:51:27,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:51:27,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:51:27,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:51:27,034 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T20:51:27,039 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T20:51:27,039 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T20:51:27,039 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T20:51:27,043 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=132, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 601 msec 2024-12-11T20:51:27,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-11T20:51:27,076 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-11T20:51:27,079 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithMergeRegion get assigned. Timeout = 60000ms 2024-12-11T20:51:27,079 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:51:27,091 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned to meta. Checking AM states. 2024-12-11T20:51:27,091 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:51:27,091 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned. 2024-12-11T20:51:27,091 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-11T20:51:27,107 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-11T20:51:27,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733950287108 (current time:1733950287108). 2024-12-11T20:51:27,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T20:51:27,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-11T20:51:27,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T20:51:27,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12e13a9f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:27,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:51:27,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:51:27,122 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:51:27,122 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:51:27,122 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:51:27,123 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74d99323, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:27,123 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:51:27,123 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:51:27,123 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:27,125 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42212, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:51:27,128 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@233d8f09, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:27,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:51:27,129 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:51:27,130 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:51:27,131 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60124, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:51:27,139 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:51:27,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:51:27,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:27,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:27,140 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:51:27,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73ad3cf7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:27,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:51:27,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:51:27,144 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:51:27,144 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:51:27,145 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:51:27,145 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5030e1b2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:27,145 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:51:27,145 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:51:27,145 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:27,149 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42236, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:51:27,150 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64e96dfc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:27,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:51:27,151 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:51:27,151 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:51:27,153 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60126, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:51:27,160 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:51:27,160 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:51:27,162 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48530, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:51:27,163 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:51:27,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:51:27,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:27,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:27,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-11T20:51:27,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T20:51:27,168 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:51:27,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-11T20:51:27,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-12-11T20:51:27,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-11T20:51:27,203 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T20:51:27,211 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T20:51:27,228 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T20:51:27,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-11T20:51:27,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742150_1326 (size=215) 2024-12-11T20:51:27,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742150_1326 (size=215) 2024-12-11T20:51:27,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742150_1326 (size=215) 2024-12-11T20:51:27,333 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T20:51:27,334 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 543d1a3f52e9986ac920e4d97e5bcf0e}, {pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 334f3e62817613989e9752453a3409fa}] 2024-12-11T20:51:27,339 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 334f3e62817613989e9752453a3409fa 2024-12-11T20:51:27,340 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 543d1a3f52e9986ac920e4d97e5bcf0e 2024-12-11T20:51:27,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-11T20:51:27,504 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40781 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=139 2024-12-11T20:51:27,504 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa. 2024-12-11T20:51:27,504 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.HRegion(2603): Flush status journal for 334f3e62817613989e9752453a3409fa: 2024-12-11T20:51:27,504 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-11T20:51:27,505 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:27,505 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:51:27,505 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T20:51:27,508 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40355 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=138 2024-12-11T20:51:27,508 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e. 2024-12-11T20:51:27,508 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.HRegion(2603): Flush status journal for 543d1a3f52e9986ac920e4d97e5bcf0e: 2024-12-11T20:51:27,508 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-11T20:51:27,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:27,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:51:27,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T20:51:27,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742151_1327 (size=86) 2024-12-11T20:51:27,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742151_1327 (size=86) 2024-12-11T20:51:27,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742151_1327 (size=86) 2024-12-11T20:51:27,599 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa. 2024-12-11T20:51:27,600 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-12-11T20:51:27,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=139 2024-12-11T20:51:27,601 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 334f3e62817613989e9752453a3409fa 2024-12-11T20:51:27,601 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 334f3e62817613989e9752453a3409fa 2024-12-11T20:51:27,606 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=139, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 334f3e62817613989e9752453a3409fa in 270 msec 2024-12-11T20:51:27,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742152_1328 (size=86) 2024-12-11T20:51:27,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742152_1328 (size=86) 2024-12-11T20:51:27,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742152_1328 (size=86) 2024-12-11T20:51:27,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e. 2024-12-11T20:51:27,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=138 2024-12-11T20:51:27,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=138 2024-12-11T20:51:27,624 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 543d1a3f52e9986ac920e4d97e5bcf0e 2024-12-11T20:51:27,624 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 543d1a3f52e9986ac920e4d97e5bcf0e 2024-12-11T20:51:27,631 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=138, resume processing ppid=137 2024-12-11T20:51:27,631 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=138, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 543d1a3f52e9986ac920e4d97e5bcf0e in 294 msec 2024-12-11T20:51:27,631 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T20:51:27,633 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T20:51:27,634 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T20:51:27,634 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:27,635 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:27,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742153_1329 (size=597) 2024-12-11T20:51:27,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742153_1329 (size=597) 2024-12-11T20:51:27,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742153_1329 (size=597) 2024-12-11T20:51:27,777 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T20:51:27,800 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T20:51:27,801 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:27,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-11T20:51:27,803 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T20:51:27,804 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-12-11T20:51:27,806 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=137, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 637 msec 2024-12-11T20:51:28,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-11T20:51:28,313 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-11T20:51:28,318 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='0900cf7a770f5aee45dde61370ad03994', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e., hostname=5be53b084ac7,40355,1733949994820, seqNum=2] 2024-12-11T20:51:28,320 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='114d8e13300578f85d05f63dbce3fbfb0', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:51:28,324 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='2aaa61c76697344233b8fc2c797a4699b', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:51:28,331 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='3204fe275bc9aad7e7cbb00e47558f3e6', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:51:28,341 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40355 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T20:51:28,345 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40781 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T20:51:28,355 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-11T20:51:28,359 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:28,359 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e. 2024-12-11T20:51:28,360 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:51:28,362 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-11T20:51:28,389 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-11T20:51:28,399 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-11T20:51:28,403 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-11T20:51:28,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733950288403 (current time:1733950288403). 2024-12-11T20:51:28,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T20:51:28,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-11T20:51:28,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T20:51:28,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2680df24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:28,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:51:28,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:51:28,405 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:51:28,405 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:51:28,405 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:51:28,405 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25929fd1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:28,405 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:51:28,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:51:28,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:28,406 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42262, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:51:28,407 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d334b80, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:28,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:51:28,408 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:51:28,408 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:51:28,409 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60136, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:51:28,410 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:51:28,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:51:28,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:28,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:28,410 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:51:28,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10b8d61a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:28,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:51:28,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:51:28,416 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:51:28,416 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:51:28,416 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:51:28,416 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@593c3a29, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:28,416 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:51:28,416 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:51:28,417 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:28,417 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42292, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:51:28,418 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7084100b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:28,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:51:28,420 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:51:28,420 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:51:28,421 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60150, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:51:28,424 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:51:28,424 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:51:28,425 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48534, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:51:28,427 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:51:28,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:51:28,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:28,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:28,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-11T20:51:28,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T20:51:28,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-11T20:51:28,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-12-11T20:51:28,430 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T20:51:28,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-11T20:51:28,432 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T20:51:28,435 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:51:28,435 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T20:51:28,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742154_1330 (size=210) 2024-12-11T20:51:28,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742154_1330 (size=210) 2024-12-11T20:51:28,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742154_1330 (size=210) 2024-12-11T20:51:28,466 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T20:51:28,466 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 543d1a3f52e9986ac920e4d97e5bcf0e}, {pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 334f3e62817613989e9752453a3409fa}] 2024-12-11T20:51:28,471 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 543d1a3f52e9986ac920e4d97e5bcf0e 2024-12-11T20:51:28,472 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 334f3e62817613989e9752453a3409fa 2024-12-11T20:51:28,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-11T20:51:28,559 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-12-11T20:51:28,624 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40355 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=141 2024-12-11T20:51:28,624 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e. 2024-12-11T20:51:28,625 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2902): Flushing 543d1a3f52e9986ac920e4d97e5bcf0e 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-12-11T20:51:28,626 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40781 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=142 2024-12-11T20:51:28,626 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa. 2024-12-11T20:51:28,627 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2902): Flushing 334f3e62817613989e9752453a3409fa 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-12-11T20:51:28,657 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/334f3e62817613989e9752453a3409fa/.tmp/cf/b6c1817fd3fc463fa4eb27f19fd8b79f is 71, key is 1eb726a6c3ab56ea761331b91feb135d/cf:q/1733950288345/Put/seqid=0 2024-12-11T20:51:28,661 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/543d1a3f52e9986ac920e4d97e5bcf0e/.tmp/cf/ab977b8e907244709bff076522be305b is 71, key is 0990d6a143f1830b35059d8bbec450f8/cf:q/1733950288341/Put/seqid=0 2024-12-11T20:51:28,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742155_1331 (size=8122) 2024-12-11T20:51:28,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742155_1331 (size=8122) 2024-12-11T20:51:28,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742155_1331 (size=8122) 2024-12-11T20:51:28,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742156_1332 (size=5492) 2024-12-11T20:51:28,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742156_1332 (size=5492) 2024-12-11T20:51:28,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742156_1332 (size=5492) 2024-12-11T20:51:28,707 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=400 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/543d1a3f52e9986ac920e4d97e5bcf0e/.tmp/cf/ab977b8e907244709bff076522be305b 2024-12-11T20:51:28,722 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/543d1a3f52e9986ac920e4d97e5bcf0e/.tmp/cf/ab977b8e907244709bff076522be305b as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/543d1a3f52e9986ac920e4d97e5bcf0e/cf/ab977b8e907244709bff076522be305b 2024-12-11T20:51:28,729 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/543d1a3f52e9986ac920e4d97e5bcf0e/cf/ab977b8e907244709bff076522be305b, entries=6, sequenceid=6, filesize=5.4 K 2024-12-11T20:51:28,735 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(3140): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for 543d1a3f52e9986ac920e4d97e5bcf0e in 111ms, sequenceid=6, compaction requested=false 2024-12-11T20:51:28,735 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2603): Flush status journal for 543d1a3f52e9986ac920e4d97e5bcf0e: 2024-12-11T20:51:28,735 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-11T20:51:28,735 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:28,736 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:51:28,736 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/543d1a3f52e9986ac920e4d97e5bcf0e/cf/ab977b8e907244709bff076522be305b] hfiles 2024-12-11T20:51:28,736 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/543d1a3f52e9986ac920e4d97e5bcf0e/cf/ab977b8e907244709bff076522be305b for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:28,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742157_1333 (size=125) 2024-12-11T20:51:28,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742157_1333 (size=125) 2024-12-11T20:51:28,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742157_1333 (size=125) 2024-12-11T20:51:28,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-11T20:51:28,770 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e. 2024-12-11T20:51:28,770 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-12-11T20:51:28,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=141 2024-12-11T20:51:28,771 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 543d1a3f52e9986ac920e4d97e5bcf0e 2024-12-11T20:51:28,771 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 543d1a3f52e9986ac920e4d97e5bcf0e 2024-12-11T20:51:28,773 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=141, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 543d1a3f52e9986ac920e4d97e5bcf0e in 306 msec 2024-12-11T20:51:29,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-11T20:51:29,074 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.87 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/334f3e62817613989e9752453a3409fa/.tmp/cf/b6c1817fd3fc463fa4eb27f19fd8b79f 2024-12-11T20:51:29,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/334f3e62817613989e9752453a3409fa/.tmp/cf/b6c1817fd3fc463fa4eb27f19fd8b79f as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/334f3e62817613989e9752453a3409fa/cf/b6c1817fd3fc463fa4eb27f19fd8b79f 2024-12-11T20:51:29,085 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/334f3e62817613989e9752453a3409fa/cf/b6c1817fd3fc463fa4eb27f19fd8b79f, entries=44, sequenceid=6, filesize=7.9 K 2024-12-11T20:51:29,086 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(3140): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for 334f3e62817613989e9752453a3409fa in 459ms, sequenceid=6, compaction requested=false 2024-12-11T20:51:29,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2603): Flush status journal for 334f3e62817613989e9752453a3409fa: 2024-12-11T20:51:29,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-11T20:51:29,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:29,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:51:29,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/334f3e62817613989e9752453a3409fa/cf/b6c1817fd3fc463fa4eb27f19fd8b79f] hfiles 2024-12-11T20:51:29,087 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/334f3e62817613989e9752453a3409fa/cf/b6c1817fd3fc463fa4eb27f19fd8b79f for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:29,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742158_1334 (size=125) 2024-12-11T20:51:29,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742158_1334 (size=125) 2024-12-11T20:51:29,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742158_1334 (size=125) 2024-12-11T20:51:29,097 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa. 2024-12-11T20:51:29,097 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=142 2024-12-11T20:51:29,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=142 2024-12-11T20:51:29,098 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 334f3e62817613989e9752453a3409fa 2024-12-11T20:51:29,098 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 334f3e62817613989e9752453a3409fa 2024-12-11T20:51:29,100 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=142, resume processing ppid=140 2024-12-11T20:51:29,100 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=142, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 334f3e62817613989e9752453a3409fa in 633 msec 2024-12-11T20:51:29,100 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T20:51:29,101 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T20:51:29,102 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T20:51:29,102 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:29,103 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:29,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742159_1335 (size=675) 2024-12-11T20:51:29,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742159_1335 (size=675) 2024-12-11T20:51:29,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742159_1335 (size=675) 2024-12-11T20:51:29,133 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T20:51:29,138 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T20:51:29,139 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:29,140 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T20:51:29,140 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-12-11T20:51:29,141 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=140, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 712 msec 2024-12-11T20:51:29,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-11T20:51:29,573 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-11T20:51:29,575 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T20:51:29,576 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T20:51:29,577 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T20:51:29,578 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60166, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T20:51:29,578 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48844, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T20:51:29,579 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48540, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T20:51:29,582 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T20:51:29,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:29,587 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T20:51:29,587 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:51:29,587 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 143 2024-12-11T20:51:29,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-11T20:51:29,591 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T20:51:29,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742160_1336 (size=399) 2024-12-11T20:51:29,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742160_1336 (size=399) 2024-12-11T20:51:29,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742160_1336 (size=399) 2024-12-11T20:51:29,609 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4506d0dd660defcc79919c76c0e5f252, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289581.4506d0dd660defcc79919c76c0e5f252.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:51:29,610 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => fdb9fbd0435733d400ca45a61ab02e0c, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733950289581.fdb9fbd0435733d400ca45a61ab02e0c.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:51:29,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742162_1338 (size=85) 2024-12-11T20:51:29,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742162_1338 (size=85) 2024-12-11T20:51:29,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742162_1338 (size=85) 2024-12-11T20:51:29,646 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289581.4506d0dd660defcc79919c76c0e5f252.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:51:29,646 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1722): Closing 4506d0dd660defcc79919c76c0e5f252, disabling compactions & flushes 2024-12-11T20:51:29,646 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289581.4506d0dd660defcc79919c76c0e5f252. 2024-12-11T20:51:29,646 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289581.4506d0dd660defcc79919c76c0e5f252. 2024-12-11T20:51:29,646 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289581.4506d0dd660defcc79919c76c0e5f252. after waiting 0 ms 2024-12-11T20:51:29,646 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289581.4506d0dd660defcc79919c76c0e5f252. 2024-12-11T20:51:29,646 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289581.4506d0dd660defcc79919c76c0e5f252. 2024-12-11T20:51:29,646 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4506d0dd660defcc79919c76c0e5f252: Waiting for close lock at 1733950289646Disabling compacts and flushes for region at 1733950289646Disabling writes for close at 1733950289646Writing region close event to WAL at 1733950289646Closed at 1733950289646 2024-12-11T20:51:29,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742161_1337 (size=85) 2024-12-11T20:51:29,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742161_1337 (size=85) 2024-12-11T20:51:29,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742161_1337 (size=85) 2024-12-11T20:51:29,654 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733950289581.fdb9fbd0435733d400ca45a61ab02e0c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:51:29,654 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1722): Closing fdb9fbd0435733d400ca45a61ab02e0c, disabling compactions & flushes 2024-12-11T20:51:29,654 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733950289581.fdb9fbd0435733d400ca45a61ab02e0c. 2024-12-11T20:51:29,654 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733950289581.fdb9fbd0435733d400ca45a61ab02e0c. 2024-12-11T20:51:29,654 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733950289581.fdb9fbd0435733d400ca45a61ab02e0c. after waiting 0 ms 2024-12-11T20:51:29,654 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733950289581.fdb9fbd0435733d400ca45a61ab02e0c. 2024-12-11T20:51:29,654 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733950289581.fdb9fbd0435733d400ca45a61ab02e0c. 2024-12-11T20:51:29,654 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1676): Region close journal for fdb9fbd0435733d400ca45a61ab02e0c: Waiting for close lock at 1733950289654Disabling compacts and flushes for region at 1733950289654Disabling writes for close at 1733950289654Writing region close event to WAL at 1733950289654Closed at 1733950289654 2024-12-11T20:51:29,655 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T20:51:29,656 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289581.4506d0dd660defcc79919c76c0e5f252.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733950289655"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950289655"}]},"ts":"1733950289655"} 2024-12-11T20:51:29,656 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733950289581.fdb9fbd0435733d400ca45a61ab02e0c.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733950289655"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950289655"}]},"ts":"1733950289655"} 2024-12-11T20:51:29,659 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T20:51:29,660 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T20:51:29,660 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950289660"}]},"ts":"1733950289660"} 2024-12-11T20:51:29,662 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-12-11T20:51:29,663 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {5be53b084ac7=0} racks are {/default-rack=0} 2024-12-11T20:51:29,664 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T20:51:29,664 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T20:51:29,664 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T20:51:29,664 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T20:51:29,664 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T20:51:29,664 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T20:51:29,664 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T20:51:29,664 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T20:51:29,664 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T20:51:29,664 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T20:51:29,664 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4506d0dd660defcc79919c76c0e5f252, ASSIGN}, {pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fdb9fbd0435733d400ca45a61ab02e0c, ASSIGN}] 2024-12-11T20:51:29,666 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fdb9fbd0435733d400ca45a61ab02e0c, ASSIGN 2024-12-11T20:51:29,666 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4506d0dd660defcc79919c76c0e5f252, ASSIGN 2024-12-11T20:51:29,667 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4506d0dd660defcc79919c76c0e5f252, ASSIGN; state=OFFLINE, location=5be53b084ac7,39865,1733949995112; forceNewPlan=false, retain=false 2024-12-11T20:51:29,667 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fdb9fbd0435733d400ca45a61ab02e0c, ASSIGN; state=OFFLINE, location=5be53b084ac7,40781,1733949995244; forceNewPlan=false, retain=false 2024-12-11T20:51:29,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-11T20:51:29,817 INFO [5be53b084ac7:41779 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T20:51:29,817 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=4506d0dd660defcc79919c76c0e5f252, regionState=OPENING, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:51:29,817 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=fdb9fbd0435733d400ca45a61ab02e0c, regionState=OPENING, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:51:29,819 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4506d0dd660defcc79919c76c0e5f252, ASSIGN because future has completed 2024-12-11T20:51:29,819 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=146, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4506d0dd660defcc79919c76c0e5f252, server=5be53b084ac7,39865,1733949995112}] 2024-12-11T20:51:29,819 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fdb9fbd0435733d400ca45a61ab02e0c, ASSIGN because future has completed 2024-12-11T20:51:29,820 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=147, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure fdb9fbd0435733d400ca45a61ab02e0c, server=5be53b084ac7,40781,1733949995244}] 2024-12-11T20:51:29,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-11T20:51:29,974 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289581.4506d0dd660defcc79919c76c0e5f252. 2024-12-11T20:51:29,974 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1733950289581.fdb9fbd0435733d400ca45a61ab02e0c. 2024-12-11T20:51:29,974 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7752): Opening region: {ENCODED => 4506d0dd660defcc79919c76c0e5f252, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289581.4506d0dd660defcc79919c76c0e5f252.', STARTKEY => '', ENDKEY => '2'} 2024-12-11T20:51:29,974 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7752): Opening region: {ENCODED => fdb9fbd0435733d400ca45a61ab02e0c, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733950289581.fdb9fbd0435733d400ca45a61ab02e0c.', STARTKEY => '2', ENDKEY => ''} 2024-12-11T20:51:29,975 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289581.4506d0dd660defcc79919c76c0e5f252. service=AccessControlService 2024-12-11T20:51:29,975 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733950289581.fdb9fbd0435733d400ca45a61ab02e0c. service=AccessControlService 2024-12-11T20:51:29,975 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:51:29,975 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:51:29,975 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 fdb9fbd0435733d400ca45a61ab02e0c 2024-12-11T20:51:29,975 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 4506d0dd660defcc79919c76c0e5f252 2024-12-11T20:51:29,975 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733950289581.fdb9fbd0435733d400ca45a61ab02e0c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:51:29,975 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289581.4506d0dd660defcc79919c76c0e5f252.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:51:29,975 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7794): checking encryption for 4506d0dd660defcc79919c76c0e5f252 2024-12-11T20:51:29,975 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7794): checking encryption for fdb9fbd0435733d400ca45a61ab02e0c 2024-12-11T20:51:29,975 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7797): checking classloading for 4506d0dd660defcc79919c76c0e5f252 2024-12-11T20:51:29,975 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7797): checking classloading for fdb9fbd0435733d400ca45a61ab02e0c 2024-12-11T20:51:29,977 INFO [StoreOpener-4506d0dd660defcc79919c76c0e5f252-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4506d0dd660defcc79919c76c0e5f252 2024-12-11T20:51:29,977 INFO [StoreOpener-fdb9fbd0435733d400ca45a61ab02e0c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region fdb9fbd0435733d400ca45a61ab02e0c 2024-12-11T20:51:29,978 INFO [StoreOpener-fdb9fbd0435733d400ca45a61ab02e0c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fdb9fbd0435733d400ca45a61ab02e0c columnFamilyName cf 2024-12-11T20:51:29,978 INFO [StoreOpener-4506d0dd660defcc79919c76c0e5f252-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4506d0dd660defcc79919c76c0e5f252 columnFamilyName cf 2024-12-11T20:51:29,978 DEBUG [StoreOpener-4506d0dd660defcc79919c76c0e5f252-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:51:29,978 DEBUG [StoreOpener-fdb9fbd0435733d400ca45a61ab02e0c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:51:29,979 INFO [StoreOpener-4506d0dd660defcc79919c76c0e5f252-1 {}] regionserver.HStore(327): Store=4506d0dd660defcc79919c76c0e5f252/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:51:29,979 INFO [StoreOpener-fdb9fbd0435733d400ca45a61ab02e0c-1 {}] regionserver.HStore(327): Store=fdb9fbd0435733d400ca45a61ab02e0c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:51:29,979 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1038): replaying wal for 4506d0dd660defcc79919c76c0e5f252 2024-12-11T20:51:29,979 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1038): replaying wal for fdb9fbd0435733d400ca45a61ab02e0c 2024-12-11T20:51:29,980 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4506d0dd660defcc79919c76c0e5f252 2024-12-11T20:51:29,980 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdb9fbd0435733d400ca45a61ab02e0c 2024-12-11T20:51:29,980 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4506d0dd660defcc79919c76c0e5f252 2024-12-11T20:51:29,980 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdb9fbd0435733d400ca45a61ab02e0c 2024-12-11T20:51:29,980 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1048): stopping wal replay for 4506d0dd660defcc79919c76c0e5f252 2024-12-11T20:51:29,980 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1060): Cleaning up temporary data for 4506d0dd660defcc79919c76c0e5f252 2024-12-11T20:51:29,981 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1048): stopping wal replay for fdb9fbd0435733d400ca45a61ab02e0c 2024-12-11T20:51:29,981 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1060): Cleaning up temporary data for fdb9fbd0435733d400ca45a61ab02e0c 2024-12-11T20:51:29,982 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1093): writing seq id for 4506d0dd660defcc79919c76c0e5f252 2024-12-11T20:51:29,983 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1093): writing seq id for fdb9fbd0435733d400ca45a61ab02e0c 2024-12-11T20:51:30,007 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4506d0dd660defcc79919c76c0e5f252/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:51:30,008 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1114): Opened 4506d0dd660defcc79919c76c0e5f252; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60167419, jitterRate=-0.10343559086322784}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:51:30,008 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdb9fbd0435733d400ca45a61ab02e0c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:51:30,008 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4506d0dd660defcc79919c76c0e5f252 2024-12-11T20:51:30,009 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1114): Opened fdb9fbd0435733d400ca45a61ab02e0c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64533712, jitterRate=-0.03837275505065918}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:51:30,009 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1122): Running coprocessor post-open hooks for fdb9fbd0435733d400ca45a61ab02e0c 2024-12-11T20:51:30,009 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1006): Region open journal for 4506d0dd660defcc79919c76c0e5f252: Running coprocessor pre-open hook at 1733950289975Writing region info on filesystem at 1733950289975Initializing all the Stores at 1733950289976 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950289976Cleaning up temporary data from old regions at 1733950289980 (+4 ms)Running coprocessor post-open hooks at 1733950290008 (+28 ms)Region opened successfully at 1733950290009 (+1 ms) 2024-12-11T20:51:30,009 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1006): Region open journal for fdb9fbd0435733d400ca45a61ab02e0c: Running coprocessor pre-open hook at 1733950289975Writing region info on filesystem at 1733950289975Initializing all the Stores at 1733950289976 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950289976Cleaning up temporary data from old regions at 1733950289981 (+5 ms)Running coprocessor post-open hooks at 1733950290009 (+28 ms)Region opened successfully at 1733950290009 2024-12-11T20:51:30,010 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289581.4506d0dd660defcc79919c76c0e5f252., pid=146, masterSystemTime=1733950289970 2024-12-11T20:51:30,010 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733950289581.fdb9fbd0435733d400ca45a61ab02e0c., pid=147, masterSystemTime=1733950289972 2024-12-11T20:51:30,024 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289581.4506d0dd660defcc79919c76c0e5f252. 2024-12-11T20:51:30,024 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289581.4506d0dd660defcc79919c76c0e5f252. 2024-12-11T20:51:30,025 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733950289581.fdb9fbd0435733d400ca45a61ab02e0c. 2024-12-11T20:51:30,026 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1733950289581.fdb9fbd0435733d400ca45a61ab02e0c. 2024-12-11T20:51:30,026 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=4506d0dd660defcc79919c76c0e5f252, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:51:30,028 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=146, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4506d0dd660defcc79919c76c0e5f252, server=5be53b084ac7,39865,1733949995112 because future has completed 2024-12-11T20:51:30,029 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=fdb9fbd0435733d400ca45a61ab02e0c, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:51:30,032 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=147, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure fdb9fbd0435733d400ca45a61ab02e0c, server=5be53b084ac7,40781,1733949995244 because future has completed 2024-12-11T20:51:30,039 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=146, resume processing ppid=144 2024-12-11T20:51:30,041 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=146, ppid=144, state=SUCCESS, hasLock=false; OpenRegionProcedure 4506d0dd660defcc79919c76c0e5f252, server=5be53b084ac7,39865,1733949995112 in 215 msec 2024-12-11T20:51:30,044 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=144, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4506d0dd660defcc79919c76c0e5f252, ASSIGN in 377 msec 2024-12-11T20:51:30,044 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=147, resume processing ppid=145 2024-12-11T20:51:30,045 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=147, ppid=145, state=SUCCESS, hasLock=false; OpenRegionProcedure fdb9fbd0435733d400ca45a61ab02e0c, server=5be53b084ac7,40781,1733949995244 in 216 msec 2024-12-11T20:51:30,050 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=145, resume processing ppid=143 2024-12-11T20:51:30,050 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=145, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fdb9fbd0435733d400ca45a61ab02e0c, ASSIGN in 381 msec 2024-12-11T20:51:30,056 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T20:51:30,056 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950290056"}]},"ts":"1733950290056"} 2024-12-11T20:51:30,062 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-12-11T20:51:30,063 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T20:51:30,063 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-12-11T20:51:30,066 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-11T20:51:30,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:51:30,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:51:30,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:51:30,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:51:30,133 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T20:51:30,133 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T20:51:30,133 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T20:51:30,133 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-11T20:51:30,133 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-11T20:51:30,133 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-11T20:51:30,133 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T20:51:30,134 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-11T20:51:30,134 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=143, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 550 msec 2024-12-11T20:51:30,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-11T20:51:30,213 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-11T20:51:30,215 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='1', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289581.4506d0dd660defcc79919c76c0e5f252., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:51:30,219 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='2', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733950289581.fdb9fbd0435733d400ca45a61ab02e0c., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:51:30,221 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion-1,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion-1 ,, for max=2147483647 with caching=100 2024-12-11T20:51:30,237 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster$2(2278): Client=jenkins//172.17.0.2 merge regions [4506d0dd660defcc79919c76c0e5f252, fdb9fbd0435733d400ca45a61ab02e0c] 2024-12-11T20:51:30,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[4506d0dd660defcc79919c76c0e5f252, fdb9fbd0435733d400ca45a61ab02e0c], force=true 2024-12-11T20:51:30,245 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[4506d0dd660defcc79919c76c0e5f252, fdb9fbd0435733d400ca45a61ab02e0c], force=true 2024-12-11T20:51:30,245 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[4506d0dd660defcc79919c76c0e5f252, fdb9fbd0435733d400ca45a61ab02e0c], force=true 2024-12-11T20:51:30,245 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[4506d0dd660defcc79919c76c0e5f252, fdb9fbd0435733d400ca45a61ab02e0c], force=true 2024-12-11T20:51:30,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-11T20:51:30,254 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4506d0dd660defcc79919c76c0e5f252, UNASSIGN}, {pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fdb9fbd0435733d400ca45a61ab02e0c, UNASSIGN}] 2024-12-11T20:51:30,255 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fdb9fbd0435733d400ca45a61ab02e0c, UNASSIGN 2024-12-11T20:51:30,255 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4506d0dd660defcc79919c76c0e5f252, UNASSIGN 2024-12-11T20:51:30,256 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=fdb9fbd0435733d400ca45a61ab02e0c, regionState=CLOSING, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:51:30,256 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=4506d0dd660defcc79919c76c0e5f252, regionState=CLOSING, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:51:30,258 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4506d0dd660defcc79919c76c0e5f252, UNASSIGN because future has completed 2024-12-11T20:51:30,259 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-11T20:51:30,259 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=151, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4506d0dd660defcc79919c76c0e5f252, server=5be53b084ac7,39865,1733949995112}] 2024-12-11T20:51:30,259 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fdb9fbd0435733d400ca45a61ab02e0c, UNASSIGN because future has completed 2024-12-11T20:51:30,260 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-11T20:51:30,260 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=152, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure fdb9fbd0435733d400ca45a61ab02e0c, server=5be53b084ac7,40781,1733949995244}] 2024-12-11T20:51:30,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-11T20:51:30,414 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(122): Close fdb9fbd0435733d400ca45a61ab02e0c 2024-12-11T20:51:30,414 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-11T20:51:30,414 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1722): Closing fdb9fbd0435733d400ca45a61ab02e0c, disabling compactions & flushes 2024-12-11T20:51:30,414 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733950289581.fdb9fbd0435733d400ca45a61ab02e0c. 2024-12-11T20:51:30,414 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733950289581.fdb9fbd0435733d400ca45a61ab02e0c. 2024-12-11T20:51:30,415 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733950289581.fdb9fbd0435733d400ca45a61ab02e0c. after waiting 0 ms 2024-12-11T20:51:30,415 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733950289581.fdb9fbd0435733d400ca45a61ab02e0c. 2024-12-11T20:51:30,415 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(2902): Flushing fdb9fbd0435733d400ca45a61ab02e0c 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-11T20:51:30,423 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(122): Close 4506d0dd660defcc79919c76c0e5f252 2024-12-11T20:51:30,423 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-11T20:51:30,424 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1722): Closing 4506d0dd660defcc79919c76c0e5f252, disabling compactions & flushes 2024-12-11T20:51:30,424 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289581.4506d0dd660defcc79919c76c0e5f252. 2024-12-11T20:51:30,424 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289581.4506d0dd660defcc79919c76c0e5f252. 2024-12-11T20:51:30,424 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289581.4506d0dd660defcc79919c76c0e5f252. after waiting 0 ms 2024-12-11T20:51:30,424 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289581.4506d0dd660defcc79919c76c0e5f252. 2024-12-11T20:51:30,424 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(2902): Flushing 4506d0dd660defcc79919c76c0e5f252 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-11T20:51:30,447 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdb9fbd0435733d400ca45a61ab02e0c/.tmp/cf/1f58c4bc26a84023b979500a7d66fc20 is 28, key is 2/cf:/1733950290220/Put/seqid=0 2024-12-11T20:51:30,470 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4506d0dd660defcc79919c76c0e5f252/.tmp/cf/0ce3be90ab4f45be85a00b70b7d6c9f3 is 28, key is 1/cf:/1733950290216/Put/seqid=0 2024-12-11T20:51:30,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742163_1339 (size=4945) 2024-12-11T20:51:30,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742163_1339 (size=4945) 2024-12-11T20:51:30,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742163_1339 (size=4945) 2024-12-11T20:51:30,505 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdb9fbd0435733d400ca45a61ab02e0c/.tmp/cf/1f58c4bc26a84023b979500a7d66fc20 2024-12-11T20:51:30,535 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdb9fbd0435733d400ca45a61ab02e0c/.tmp/cf/1f58c4bc26a84023b979500a7d66fc20 as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdb9fbd0435733d400ca45a61ab02e0c/cf/1f58c4bc26a84023b979500a7d66fc20 2024-12-11T20:51:30,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742164_1340 (size=4945) 2024-12-11T20:51:30,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742164_1340 (size=4945) 2024-12-11T20:51:30,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742164_1340 (size=4945) 2024-12-11T20:51:30,546 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4506d0dd660defcc79919c76c0e5f252/.tmp/cf/0ce3be90ab4f45be85a00b70b7d6c9f3 2024-12-11T20:51:30,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-11T20:51:30,564 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdb9fbd0435733d400ca45a61ab02e0c/cf/1f58c4bc26a84023b979500a7d66fc20, entries=1, sequenceid=5, filesize=4.8 K 2024-12-11T20:51:30,578 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for fdb9fbd0435733d400ca45a61ab02e0c in 163ms, sequenceid=5, compaction requested=false 2024-12-11T20:51:30,579 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-12-11T20:51:30,602 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4506d0dd660defcc79919c76c0e5f252/.tmp/cf/0ce3be90ab4f45be85a00b70b7d6c9f3 as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4506d0dd660defcc79919c76c0e5f252/cf/0ce3be90ab4f45be85a00b70b7d6c9f3 2024-12-11T20:51:30,630 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4506d0dd660defcc79919c76c0e5f252/cf/0ce3be90ab4f45be85a00b70b7d6c9f3, entries=1, sequenceid=5, filesize=4.8 K 2024-12-11T20:51:30,641 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 4506d0dd660defcc79919c76c0e5f252 in 217ms, sequenceid=5, compaction requested=false 2024-12-11T20:51:30,651 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdb9fbd0435733d400ca45a61ab02e0c/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-11T20:51:30,655 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:51:30,655 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733950289581.fdb9fbd0435733d400ca45a61ab02e0c. 2024-12-11T20:51:30,655 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1676): Region close journal for fdb9fbd0435733d400ca45a61ab02e0c: Waiting for close lock at 1733950290414Running coprocessor pre-close hooks at 1733950290414Disabling compacts and flushes for region at 1733950290414Disabling writes for close at 1733950290415 (+1 ms)Obtaining lock to block concurrent updates at 1733950290415Preparing flush snapshotting stores in fdb9fbd0435733d400ca45a61ab02e0c at 1733950290415Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,2,1733950289581.fdb9fbd0435733d400ca45a61ab02e0c., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1733950290415Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,2,1733950289581.fdb9fbd0435733d400ca45a61ab02e0c. at 1733950290419 (+4 ms)Flushing fdb9fbd0435733d400ca45a61ab02e0c/cf: creating writer at 1733950290419Flushing fdb9fbd0435733d400ca45a61ab02e0c/cf: appending metadata at 1733950290445 (+26 ms)Flushing fdb9fbd0435733d400ca45a61ab02e0c/cf: closing flushed file at 1733950290445Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@39a24aa6: reopening flushed file at 1733950290533 (+88 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for fdb9fbd0435733d400ca45a61ab02e0c in 163ms, sequenceid=5, compaction requested=false at 1733950290578 (+45 ms)Writing region close event to WAL at 1733950290623 (+45 ms)Running coprocessor post-close hooks at 1733950290655 (+32 ms)Closed at 1733950290655 2024-12-11T20:51:30,674 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(157): Closed fdb9fbd0435733d400ca45a61ab02e0c 2024-12-11T20:51:30,676 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4506d0dd660defcc79919c76c0e5f252/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-11T20:51:30,677 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=fdb9fbd0435733d400ca45a61ab02e0c, regionState=CLOSED 2024-12-11T20:51:30,678 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:51:30,678 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289581.4506d0dd660defcc79919c76c0e5f252. 2024-12-11T20:51:30,678 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1676): Region close journal for 4506d0dd660defcc79919c76c0e5f252: Waiting for close lock at 1733950290424Running coprocessor pre-close hooks at 1733950290424Disabling compacts and flushes for region at 1733950290424Disabling writes for close at 1733950290424Obtaining lock to block concurrent updates at 1733950290424Preparing flush snapshotting stores in 4506d0dd660defcc79919c76c0e5f252 at 1733950290424Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289581.4506d0dd660defcc79919c76c0e5f252., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1733950290424Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289581.4506d0dd660defcc79919c76c0e5f252. at 1733950290439 (+15 ms)Flushing 4506d0dd660defcc79919c76c0e5f252/cf: creating writer at 1733950290439Flushing 4506d0dd660defcc79919c76c0e5f252/cf: appending metadata at 1733950290470 (+31 ms)Flushing 4506d0dd660defcc79919c76c0e5f252/cf: closing flushed file at 1733950290470Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@56551dee: reopening flushed file at 1733950290581 (+111 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 4506d0dd660defcc79919c76c0e5f252 in 217ms, sequenceid=5, compaction requested=false at 1733950290641 (+60 ms)Writing region close event to WAL at 1733950290653 (+12 ms)Running coprocessor post-close hooks at 1733950290678 (+25 ms)Closed at 1733950290678 2024-12-11T20:51:30,680 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=152, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure fdb9fbd0435733d400ca45a61ab02e0c, server=5be53b084ac7,40781,1733949995244 because future has completed 2024-12-11T20:51:30,698 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(157): Closed 4506d0dd660defcc79919c76c0e5f252 2024-12-11T20:51:30,706 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=4506d0dd660defcc79919c76c0e5f252, regionState=CLOSED 2024-12-11T20:51:30,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=151, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4506d0dd660defcc79919c76c0e5f252, server=5be53b084ac7,39865,1733949995112 because future has completed 2024-12-11T20:51:30,710 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0006_000001 (auth:SIMPLE) from 127.0.0.1:49058 2024-12-11T20:51:30,710 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=152, resume processing ppid=150 2024-12-11T20:51:30,712 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=152, ppid=150, state=SUCCESS, hasLock=false; CloseRegionProcedure fdb9fbd0435733d400ca45a61ab02e0c, server=5be53b084ac7,40781,1733949995244 in 437 msec 2024-12-11T20:51:30,713 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=150, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fdb9fbd0435733d400ca45a61ab02e0c, UNASSIGN in 457 msec 2024-12-11T20:51:30,713 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=151, resume processing ppid=149 2024-12-11T20:51:30,713 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=151, ppid=149, state=SUCCESS, hasLock=false; CloseRegionProcedure 4506d0dd660defcc79919c76c0e5f252, server=5be53b084ac7,39865,1733949995112 in 451 msec 2024-12-11T20:51:30,716 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=149, resume processing ppid=148 2024-12-11T20:51:30,716 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=149, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4506d0dd660defcc79919c76c0e5f252, UNASSIGN in 459 msec 2024-12-11T20:51:30,738 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_3/usercache/jenkins/appcache/application_1733950008312_0006/container_1733950008312_0006_01_000001/launch_container.sh] 2024-12-11T20:51:30,738 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_3/usercache/jenkins/appcache/application_1733950008312_0006/container_1733950008312_0006_01_000001/container_tokens] 2024-12-11T20:51:30,738 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_3/usercache/jenkins/appcache/application_1733950008312_0006/container_1733950008312_0006_01_000001/sysfs] 2024-12-11T20:51:30,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742165_1341 (size=84) 2024-12-11T20:51:30,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742165_1341 (size=84) 2024-12-11T20:51:30,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742165_1341 (size=84) 2024-12-11T20:51:30,793 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:51:30,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742166_1342 (size=20) 2024-12-11T20:51:30,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742166_1342 (size=20) 2024-12-11T20:51:30,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742166_1342 (size=20) 2024-12-11T20:51:30,831 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:51:30,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742167_1343 (size=21) 2024-12-11T20:51:30,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742167_1343 (size=21) 2024-12-11T20:51:30,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742167_1343 (size=21) 2024-12-11T20:51:30,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742168_1344 (size=84) 2024-12-11T20:51:30,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742168_1344 (size=84) 2024-12-11T20:51:30,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742168_1344 (size=84) 2024-12-11T20:51:30,858 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:51:30,869 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b8b7662a0229049c109d58ebdbba07cf/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-12-11T20:51:30,872 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289581.4506d0dd660defcc79919c76c0e5f252.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-11T20:51:30,872 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733950289581.fdb9fbd0435733d400ca45a61ab02e0c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-11T20:51:30,872 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289582.b8b7662a0229049c109d58ebdbba07cf.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-11T20:51:30,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-11T20:51:30,877 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b8b7662a0229049c109d58ebdbba07cf, ASSIGN}] 2024-12-11T20:51:30,878 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b8b7662a0229049c109d58ebdbba07cf, ASSIGN 2024-12-11T20:51:30,883 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b8b7662a0229049c109d58ebdbba07cf, ASSIGN; state=MERGED, location=5be53b084ac7,39865,1733949995112; forceNewPlan=false, retain=false 2024-12-11T20:51:31,035 INFO [5be53b084ac7:41779 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-11T20:51:31,036 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=b8b7662a0229049c109d58ebdbba07cf, regionState=OPENING, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:51:31,044 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b8b7662a0229049c109d58ebdbba07cf, ASSIGN because future has completed 2024-12-11T20:51:31,044 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure b8b7662a0229049c109d58ebdbba07cf, server=5be53b084ac7,39865,1733949995112}] 2024-12-11T20:51:31,202 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289582.b8b7662a0229049c109d58ebdbba07cf. 2024-12-11T20:51:31,202 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7752): Opening region: {ENCODED => b8b7662a0229049c109d58ebdbba07cf, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289582.b8b7662a0229049c109d58ebdbba07cf.', STARTKEY => '', ENDKEY => ''} 2024-12-11T20:51:31,202 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289582.b8b7662a0229049c109d58ebdbba07cf. service=AccessControlService 2024-12-11T20:51:31,202 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:51:31,202 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 b8b7662a0229049c109d58ebdbba07cf 2024-12-11T20:51:31,202 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289582.b8b7662a0229049c109d58ebdbba07cf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:51:31,203 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7794): checking encryption for b8b7662a0229049c109d58ebdbba07cf 2024-12-11T20:51:31,203 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7797): checking classloading for b8b7662a0229049c109d58ebdbba07cf 2024-12-11T20:51:31,213 INFO [StoreOpener-b8b7662a0229049c109d58ebdbba07cf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b8b7662a0229049c109d58ebdbba07cf 2024-12-11T20:51:31,215 INFO [StoreOpener-b8b7662a0229049c109d58ebdbba07cf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b8b7662a0229049c109d58ebdbba07cf columnFamilyName cf 2024-12-11T20:51:31,216 DEBUG [StoreOpener-b8b7662a0229049c109d58ebdbba07cf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:51:31,253 DEBUG [StoreOpener-b8b7662a0229049c109d58ebdbba07cf-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b8b7662a0229049c109d58ebdbba07cf/cf/0ce3be90ab4f45be85a00b70b7d6c9f3.4506d0dd660defcc79919c76c0e5f252->hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4506d0dd660defcc79919c76c0e5f252/cf/0ce3be90ab4f45be85a00b70b7d6c9f3-top 2024-12-11T20:51:31,270 DEBUG [StoreOpener-b8b7662a0229049c109d58ebdbba07cf-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b8b7662a0229049c109d58ebdbba07cf/cf/1f58c4bc26a84023b979500a7d66fc20.fdb9fbd0435733d400ca45a61ab02e0c->hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdb9fbd0435733d400ca45a61ab02e0c/cf/1f58c4bc26a84023b979500a7d66fc20-top 2024-12-11T20:51:31,271 INFO [StoreOpener-b8b7662a0229049c109d58ebdbba07cf-1 {}] regionserver.HStore(327): Store=b8b7662a0229049c109d58ebdbba07cf/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:51:31,271 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1038): replaying wal for b8b7662a0229049c109d58ebdbba07cf 2024-12-11T20:51:31,275 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b8b7662a0229049c109d58ebdbba07cf 2024-12-11T20:51:31,283 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b8b7662a0229049c109d58ebdbba07cf 2024-12-11T20:51:31,284 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1048): stopping wal replay for b8b7662a0229049c109d58ebdbba07cf 2024-12-11T20:51:31,284 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1060): Cleaning up temporary data for b8b7662a0229049c109d58ebdbba07cf 2024-12-11T20:51:31,291 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1093): writing seq id for b8b7662a0229049c109d58ebdbba07cf 2024-12-11T20:51:31,293 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1114): Opened b8b7662a0229049c109d58ebdbba07cf; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74252249, jitterRate=0.10644473135471344}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:51:31,293 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b8b7662a0229049c109d58ebdbba07cf 2024-12-11T20:51:31,294 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1006): Region open journal for b8b7662a0229049c109d58ebdbba07cf: Running coprocessor pre-open hook at 1733950291204Writing region info on filesystem at 1733950291204Initializing all the Stores at 1733950291209 (+5 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950291209Cleaning up temporary data from old regions at 1733950291285 (+76 ms)Running coprocessor post-open hooks at 1733950291293 (+8 ms)Region opened successfully at 1733950291294 (+1 ms) 2024-12-11T20:51:31,315 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289582.b8b7662a0229049c109d58ebdbba07cf., pid=154, masterSystemTime=1733950291197 2024-12-11T20:51:31,315 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289582.b8b7662a0229049c109d58ebdbba07cf.,because compaction is disabled. 2024-12-11T20:51:31,323 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289582.b8b7662a0229049c109d58ebdbba07cf. 2024-12-11T20:51:31,323 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289582.b8b7662a0229049c109d58ebdbba07cf. 2024-12-11T20:51:31,327 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=b8b7662a0229049c109d58ebdbba07cf, regionState=OPEN, openSeqNum=9, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:51:31,341 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure b8b7662a0229049c109d58ebdbba07cf, server=5be53b084ac7,39865,1733949995112 because future has completed 2024-12-11T20:51:31,349 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=154, resume processing ppid=153 2024-12-11T20:51:31,349 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=154, ppid=153, state=SUCCESS, hasLock=false; OpenRegionProcedure b8b7662a0229049c109d58ebdbba07cf, server=5be53b084ac7,39865,1733949995112 in 300 msec 2024-12-11T20:51:31,357 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=153, resume processing ppid=148 2024-12-11T20:51:31,357 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=153, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b8b7662a0229049c109d58ebdbba07cf, ASSIGN in 473 msec 2024-12-11T20:51:31,359 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=148, state=SUCCESS, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[4506d0dd660defcc79919c76c0e5f252, fdb9fbd0435733d400ca45a61ab02e0c], force=true in 1.1170 sec 2024-12-11T20:51:31,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-11T20:51:31,384 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-11T20:51:31,385 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-11T20:51:31,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733950291385 (current time:1733950291385). 2024-12-11T20:51:31,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T20:51:31,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-12-11T20:51:31,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T20:51:31,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59c87980, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:31,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:51:31,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:51:31,396 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:51:31,396 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:51:31,396 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:51:31,397 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f80ddd8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:31,397 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:51:31,397 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:51:31,397 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:31,404 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38912, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:51:31,405 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44d79352, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:31,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:51:31,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:51:31,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:51:31,408 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49784, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:51:31,413 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:51:31,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:51:31,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:31,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:31,413 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:51:31,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57c98d7a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:31,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:51:31,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:51:31,420 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:51:31,421 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:51:31,421 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:51:31,421 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b0eab7f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:31,421 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:51:31,422 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:51:31,422 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:31,423 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38926, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:51:31,424 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c05a3c1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:31,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:51:31,426 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:51:31,427 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:51:31,428 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49794, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:51:31,430 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion-1', locateType=CURRENT is [region=hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:51:31,431 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:51:31,434 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38554, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:51:31,438 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:51:31,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:51:31,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:31,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:31,438 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:51:31,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-11T20:51:31,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T20:51:31,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-11T20:51:31,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-12-11T20:51:31,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-11T20:51:31,449 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T20:51:31,450 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T20:51:31,462 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T20:51:31,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742169_1345 (size=216) 2024-12-11T20:51:31,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742169_1345 (size=216) 2024-12-11T20:51:31,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742169_1345 (size=216) 2024-12-11T20:51:31,542 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T20:51:31,542 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b8b7662a0229049c109d58ebdbba07cf}] 2024-12-11T20:51:31,544 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b8b7662a0229049c109d58ebdbba07cf 2024-12-11T20:51:31,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-11T20:51:31,696 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=156 2024-12-11T20:51:31,696 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289582.b8b7662a0229049c109d58ebdbba07cf. 2024-12-11T20:51:31,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.HRegion(2603): Flush status journal for b8b7662a0229049c109d58ebdbba07cf: 2024-12-11T20:51:31,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289582.b8b7662a0229049c109d58ebdbba07cf. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-12-11T20:51:31,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289582.b8b7662a0229049c109d58ebdbba07cf.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:31,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:51:31,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b8b7662a0229049c109d58ebdbba07cf/cf/0ce3be90ab4f45be85a00b70b7d6c9f3.4506d0dd660defcc79919c76c0e5f252->hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4506d0dd660defcc79919c76c0e5f252/cf/0ce3be90ab4f45be85a00b70b7d6c9f3-top, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b8b7662a0229049c109d58ebdbba07cf/cf/1f58c4bc26a84023b979500a7d66fc20.fdb9fbd0435733d400ca45a61ab02e0c->hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdb9fbd0435733d400ca45a61ab02e0c/cf/1f58c4bc26a84023b979500a7d66fc20-top] hfiles 2024-12-11T20:51:31,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b8b7662a0229049c109d58ebdbba07cf/cf/0ce3be90ab4f45be85a00b70b7d6c9f3.4506d0dd660defcc79919c76c0e5f252 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:31,698 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b8b7662a0229049c109d58ebdbba07cf/cf/1f58c4bc26a84023b979500a7d66fc20.fdb9fbd0435733d400ca45a61ab02e0c for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:31,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742170_1346 (size=269) 2024-12-11T20:51:31,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742170_1346 (size=269) 2024-12-11T20:51:31,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742170_1346 (size=269) 2024-12-11T20:51:31,742 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289582.b8b7662a0229049c109d58ebdbba07cf. 2024-12-11T20:51:31,742 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-12-11T20:51:31,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=156 2024-12-11T20:51:31,743 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region b8b7662a0229049c109d58ebdbba07cf 2024-12-11T20:51:31,743 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b8b7662a0229049c109d58ebdbba07cf 2024-12-11T20:51:31,743 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T20:51:31,751 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=156, resume processing ppid=155 2024-12-11T20:51:31,751 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=156, ppid=155, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b8b7662a0229049c109d58ebdbba07cf in 204 msec 2024-12-11T20:51:31,751 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T20:51:31,753 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T20:51:31,754 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T20:51:31,755 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:31,756 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:31,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-11T20:51:31,784 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T20:51:31,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742171_1347 (size=670) 2024-12-11T20:51:31,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742171_1347 (size=670) 2024-12-11T20:51:31,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742171_1347 (size=670) 2024-12-11T20:51:31,827 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T20:51:31,837 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T20:51:31,838 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:31,840 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T20:51:31,840 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-12-11T20:51:31,844 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=155, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 397 msec 2024-12-11T20:51:32,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-11T20:51:32,073 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-11T20:51:32,074 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950292073 2024-12-11T20:51:32,074 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:36717, tgtDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950292073, rawTgtDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950292073, srcFsUri=hdfs://localhost:36717, srcDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:51:32,119 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:36717, inputRoot=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:51:32,119 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_754086280_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950292073, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950292073/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:32,121 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T20:51:32,128 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950292073/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:32,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742172_1348 (size=670) 2024-12-11T20:51:32,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742172_1348 (size=670) 2024-12-11T20:51:32,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742172_1348 (size=670) 2024-12-11T20:51:32,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742173_1349 (size=216) 2024-12-11T20:51:32,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742173_1349 (size=216) 2024-12-11T20:51:32,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742173_1349 (size=216) 2024-12-11T20:51:32,204 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:51:32,204 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:51:32,205 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:51:33,491 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop-10935019014098677693.jar 2024-12-11T20:51:33,492 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:51:33,492 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:51:33,565 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop-11199884584998864603.jar 2024-12-11T20:51:33,566 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:51:33,566 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:51:33,566 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:51:33,566 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:51:33,567 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:51:33,567 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:51:33,567 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T20:51:33,567 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T20:51:33,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T20:51:33,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T20:51:33,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T20:51:33,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T20:51:33,569 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T20:51:33,569 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T20:51:33,569 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T20:51:33,570 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T20:51:33,570 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T20:51:33,570 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:51:33,571 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:51:33,571 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:51:33,572 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:51:33,572 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:51:33,572 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:51:33,572 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:51:33,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742174_1350 (size=131440) 2024-12-11T20:51:33,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742174_1350 (size=131440) 2024-12-11T20:51:33,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742174_1350 (size=131440) 2024-12-11T20:51:33,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742175_1351 (size=4188619) 2024-12-11T20:51:33,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742175_1351 (size=4188619) 2024-12-11T20:51:33,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742175_1351 (size=4188619) 2024-12-11T20:51:33,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742176_1352 (size=1323991) 2024-12-11T20:51:33,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742176_1352 (size=1323991) 2024-12-11T20:51:33,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742176_1352 (size=1323991) 2024-12-11T20:51:33,979 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:33,979 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-12-11T20:51:33,980 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:33,980 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-12-11T20:51:33,980 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-11T20:51:34,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742177_1353 (size=903544) 2024-12-11T20:51:34,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742177_1353 (size=903544) 2024-12-11T20:51:34,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742177_1353 (size=903544) 2024-12-11T20:51:34,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742178_1354 (size=8360360) 2024-12-11T20:51:34,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742178_1354 (size=8360360) 2024-12-11T20:51:34,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742178_1354 (size=8360360) 2024-12-11T20:51:34,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742179_1355 (size=6426228) 2024-12-11T20:51:34,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742179_1355 (size=6426228) 2024-12-11T20:51:34,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742179_1355 (size=6426228) 2024-12-11T20:51:34,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742180_1356 (size=1877034) 2024-12-11T20:51:34,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742180_1356 (size=1877034) 2024-12-11T20:51:34,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742180_1356 (size=1877034) 2024-12-11T20:51:34,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742181_1357 (size=77835) 2024-12-11T20:51:34,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742181_1357 (size=77835) 2024-12-11T20:51:34,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742181_1357 (size=77835) 2024-12-11T20:51:34,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742182_1358 (size=30949) 2024-12-11T20:51:34,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742182_1358 (size=30949) 2024-12-11T20:51:34,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742182_1358 (size=30949) 2024-12-11T20:51:35,006 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=919.14 KB, freeSize=879.10 MB, max=880 MB, blockCount=3, accesses=5, hits=2, hitRatio=40.00%, , cachingAccesses=5, cachingHits=2, cachingHitsRatio=40.00%, evictions=29, evicted=0, evictedPerRun=0.0 2024-12-11T20:51:35,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742183_1359 (size=1597213) 2024-12-11T20:51:35,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742183_1359 (size=1597213) 2024-12-11T20:51:35,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742183_1359 (size=1597213) 2024-12-11T20:51:35,191 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=662.40 KB, freeSize=879.35 MB, max=880 MB, blockCount=2, accesses=2, hits=0, hitRatio=0, cachingAccesses=2, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-11T20:51:35,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742184_1360 (size=4695811) 2024-12-11T20:51:35,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742184_1360 (size=4695811) 2024-12-11T20:51:35,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742184_1360 (size=4695811) 2024-12-11T20:51:35,319 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-11T20:51:35,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742185_1361 (size=232957) 2024-12-11T20:51:35,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742185_1361 (size=232957) 2024-12-11T20:51:35,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742185_1361 (size=232957) 2024-12-11T20:51:35,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742186_1362 (size=127628) 2024-12-11T20:51:35,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742186_1362 (size=127628) 2024-12-11T20:51:35,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742186_1362 (size=127628) 2024-12-11T20:51:35,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742187_1363 (size=20406) 2024-12-11T20:51:35,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742187_1363 (size=20406) 2024-12-11T20:51:35,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742187_1363 (size=20406) 2024-12-11T20:51:35,611 DEBUG [master/5be53b084ac7:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-11T20:51:35,615 DEBUG [master/5be53b084ac7:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=2, created chunk count=10, reused chunk count=14, reuseRatio=58.33% 2024-12-11T20:51:35,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742188_1364 (size=5175431) 2024-12-11T20:51:35,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742188_1364 (size=5175431) 2024-12-11T20:51:35,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742188_1364 (size=5175431) 2024-12-11T20:51:35,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742189_1365 (size=217634) 2024-12-11T20:51:35,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742189_1365 (size=217634) 2024-12-11T20:51:35,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742189_1365 (size=217634) 2024-12-11T20:51:36,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742190_1366 (size=443172) 2024-12-11T20:51:36,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742190_1366 (size=443172) 2024-12-11T20:51:36,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742190_1366 (size=443172) 2024-12-11T20:51:36,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742191_1367 (size=1832290) 2024-12-11T20:51:36,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742191_1367 (size=1832290) 2024-12-11T20:51:36,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742191_1367 (size=1832290) 2024-12-11T20:51:36,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742192_1368 (size=322274) 2024-12-11T20:51:36,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742192_1368 (size=322274) 2024-12-11T20:51:36,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742192_1368 (size=322274) 2024-12-11T20:51:36,710 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T20:51:36,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742193_1369 (size=503880) 2024-12-11T20:51:36,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742193_1369 (size=503880) 2024-12-11T20:51:36,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742193_1369 (size=503880) 2024-12-11T20:51:36,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742194_1370 (size=29229) 2024-12-11T20:51:36,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742194_1370 (size=29229) 2024-12-11T20:51:36,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742194_1370 (size=29229) 2024-12-11T20:51:37,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742195_1371 (size=24096) 2024-12-11T20:51:37,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742195_1371 (size=24096) 2024-12-11T20:51:37,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742195_1371 (size=24096) 2024-12-11T20:51:37,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742196_1372 (size=111872) 2024-12-11T20:51:37,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742196_1372 (size=111872) 2024-12-11T20:51:37,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742196_1372 (size=111872) 2024-12-11T20:51:37,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742197_1373 (size=45609) 2024-12-11T20:51:37,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742197_1373 (size=45609) 2024-12-11T20:51:37,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742197_1373 (size=45609) 2024-12-11T20:51:37,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742198_1374 (size=136454) 2024-12-11T20:51:37,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742198_1374 (size=136454) 2024-12-11T20:51:37,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742198_1374 (size=136454) 2024-12-11T20:51:37,749 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T20:51:37,763 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-12-11T20:51:37,767 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=4.8 K 2024-12-11T20:51:37,768 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=4.8 K 2024-12-11T20:51:37,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742199_1375 (size=481) 2024-12-11T20:51:37,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742199_1375 (size=481) 2024-12-11T20:51:37,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742199_1375 (size=481) 2024-12-11T20:51:37,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742200_1376 (size=21) 2024-12-11T20:51:37,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742200_1376 (size=21) 2024-12-11T20:51:37,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742200_1376 (size=21) 2024-12-11T20:51:38,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742201_1377 (size=304061) 2024-12-11T20:51:38,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742201_1377 (size=304061) 2024-12-11T20:51:38,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742201_1377 (size=304061) 2024-12-11T20:51:38,155 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T20:51:38,155 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T20:51:38,242 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0007_000001 (auth:SIMPLE) from 127.0.0.1:60072 2024-12-11T20:51:40,176 INFO [regionserver/5be53b084ac7:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-11T20:51:40,181 INFO [regionserver/5be53b084ac7:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-11T20:51:40,187 INFO [regionserver/5be53b084ac7:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-11T20:51:41,977 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testtb-testExportFileSystemStateWithMergeRegion because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-11T20:51:41,977 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testtb-testExportFileSystemStateWithMergeRegion-1 because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-11T20:51:41,977 DEBUG [master/5be53b084ac7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 543d1a3f52e9986ac920e4d97e5bcf0e changed from -1.0 to 0.0, refreshing cache 2024-12-11T20:51:41,977 DEBUG [master/5be53b084ac7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 334f3e62817613989e9752453a3409fa changed from -1.0 to 0.0, refreshing cache 2024-12-11T20:51:41,984 DEBUG [master/5be53b084ac7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-11T20:51:41,984 DEBUG [master/5be53b084ac7:0.Chore.1 {}] janitor.CatalogJanitor(258): Cleaning merged region {ENCODED => b8b7662a0229049c109d58ebdbba07cf, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289582.b8b7662a0229049c109d58ebdbba07cf.', STARTKEY => '', ENDKEY => ''} 2024-12-11T20:51:41,986 DEBUG [master/5be53b084ac7:0.Chore.1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:51:41,987 DEBUG [master/5be53b084ac7:0.Chore.1 {}] janitor.CatalogJanitor(283): Deferring cleanup up of 2 parents of merged region b8b7662a0229049c109d58ebdbba07cf, because references still exist in merged region or we encountered an exception in checking 2024-12-11T20:51:42,108 INFO [regionserver/5be53b084ac7:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:meta,,1.1588230740 because 1588230740/ns has an old edit so flush to free WALs after random delay 154962 ms 2024-12-11T20:51:43,502 DEBUG [master/5be53b084ac7:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(138): Balancing RSGroup=default 2024-12-11T20:51:43,503 INFO [master/5be53b084ac7:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(151): Start Generate Balance plan for group: default 2024-12-11T20:51:43,503 DEBUG [master/5be53b084ac7:0.Chore.1 {}] balancer.BaseLoadBalancer(619): Start Generate Balance plan for cluster. 2024-12-11T20:51:43,503 DEBUG [master/5be53b084ac7:0.Chore.1 {}] balancer.BalancerClusterState(204): Hosts are {5be53b084ac7=0} racks are {/default-rack=0} 2024-12-11T20:51:43,505 DEBUG [master/5be53b084ac7:0.Chore.1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:51:43,509 DEBUG [master/5be53b084ac7:0.Chore.1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:51:43,512 DEBUG [master/5be53b084ac7:0.Chore.1 {}] balancer.BalancerClusterState(303): server 0 has 2 regions 2024-12-11T20:51:43,512 DEBUG [master/5be53b084ac7:0.Chore.1 {}] balancer.BalancerClusterState(303): server 1 has 1 regions 2024-12-11T20:51:43,512 DEBUG [master/5be53b084ac7:0.Chore.1 {}] balancer.BalancerClusterState(303): server 2 has 2 regions 2024-12-11T20:51:43,512 DEBUG [master/5be53b084ac7:0.Chore.1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T20:51:43,512 DEBUG [master/5be53b084ac7:0.Chore.1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T20:51:43,512 DEBUG [master/5be53b084ac7:0.Chore.1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T20:51:43,512 INFO [master/5be53b084ac7:0.Chore.1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T20:51:43,512 INFO [master/5be53b084ac7:0.Chore.1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T20:51:43,512 INFO [master/5be53b084ac7:0.Chore.1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T20:51:43,513 DEBUG [master/5be53b084ac7:0.Chore.1 {}] balancer.BalancerClusterState(326): Number of tables=4, number of hosts=1, number of racks=1 2024-12-11T20:51:43,518 INFO [master/5be53b084ac7:0.Chore.1 {}] balancer.StochasticLoadBalancer(395): Cluster wide - skipping load balancing because weighted average imbalance=0.014107282920641593 <= threshold(0.025). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 0.025 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.8477639612815459, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.8648601852843434, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-12-11T20:51:43,518 DEBUG [master/5be53b084ac7:0.Chore.1 {}] master.HMaster(2203): Balancer is going into sleep until next period in 300000ms 2024-12-11T20:51:43,862 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0007_000001 (auth:SIMPLE) from 127.0.0.1:36546 2024-12-11T20:51:44,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742202_1378 (size=349759) 2024-12-11T20:51:44,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742202_1378 (size=349759) 2024-12-11T20:51:44,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742202_1378 (size=349759) 2024-12-11T20:51:46,077 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0007_000001 (auth:SIMPLE) from 127.0.0.1:39934 2024-12-11T20:51:46,077 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0007_000001 (auth:SIMPLE) from 127.0.0.1:45978 2024-12-11T20:51:46,857 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T20:51:49,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742203_1379 (size=4945) 2024-12-11T20:51:49,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742203_1379 (size=4945) 2024-12-11T20:51:49,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742203_1379 (size=4945) 2024-12-11T20:51:50,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742205_1381 (size=4945) 2024-12-11T20:51:50,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742205_1381 (size=4945) 2024-12-11T20:51:50,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742205_1381 (size=4945) 2024-12-11T20:51:50,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742204_1380 (size=22231) 2024-12-11T20:51:50,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742204_1380 (size=22231) 2024-12-11T20:51:50,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742204_1380 (size=22231) 2024-12-11T20:51:50,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742206_1382 (size=482) 2024-12-11T20:51:50,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742206_1382 (size=482) 2024-12-11T20:51:50,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742206_1382 (size=482) 2024-12-11T20:51:50,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742207_1383 (size=22231) 2024-12-11T20:51:50,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742207_1383 (size=22231) 2024-12-11T20:51:50,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742207_1383 (size=22231) 2024-12-11T20:51:50,837 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_0/usercache/jenkins/appcache/application_1733950008312_0007/container_1733950008312_0007_01_000003/launch_container.sh] 2024-12-11T20:51:50,837 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_0/usercache/jenkins/appcache/application_1733950008312_0007/container_1733950008312_0007_01_000003/container_tokens] 2024-12-11T20:51:50,837 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_0/usercache/jenkins/appcache/application_1733950008312_0007/container_1733950008312_0007_01_000003/sysfs] 2024-12-11T20:51:50,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742208_1384 (size=349759) 2024-12-11T20:51:50,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742208_1384 (size=349759) 2024-12-11T20:51:50,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742208_1384 (size=349759) 2024-12-11T20:51:50,869 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0007_000001 (auth:SIMPLE) from 127.0.0.1:55354 2024-12-11T20:51:50,877 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0007_000001 (auth:SIMPLE) from 127.0.0.1:37046 2024-12-11T20:51:52,495 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T20:51:52,496 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T20:51:52,511 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:52,511 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T20:51:52,511 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T20:51:52,512 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_754086280_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:52,512 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-11T20:51:52,512 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-11T20:51:52,512 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_754086280_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950292073/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950292073/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:52,515 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950292073/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-11T20:51:52,515 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950292073/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-11T20:51:52,532 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:52,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=157, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:52,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-11T20:51:52,536 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950312536"}]},"ts":"1733950312536"} 2024-12-11T20:51:52,538 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-12-11T20:51:52,538 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-12-11T20:51:52,539 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-12-11T20:51:52,542 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b8b7662a0229049c109d58ebdbba07cf, UNASSIGN}] 2024-12-11T20:51:52,544 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b8b7662a0229049c109d58ebdbba07cf, UNASSIGN 2024-12-11T20:51:52,545 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=b8b7662a0229049c109d58ebdbba07cf, regionState=CLOSING, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:51:52,547 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b8b7662a0229049c109d58ebdbba07cf, UNASSIGN because future has completed 2024-12-11T20:51:52,547 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T20:51:52,547 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure b8b7662a0229049c109d58ebdbba07cf, server=5be53b084ac7,39865,1733949995112}] 2024-12-11T20:51:52,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-11T20:51:52,700 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(122): Close b8b7662a0229049c109d58ebdbba07cf 2024-12-11T20:51:52,700 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T20:51:52,701 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1722): Closing b8b7662a0229049c109d58ebdbba07cf, disabling compactions & flushes 2024-12-11T20:51:52,701 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289582.b8b7662a0229049c109d58ebdbba07cf. 2024-12-11T20:51:52,701 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289582.b8b7662a0229049c109d58ebdbba07cf. 2024-12-11T20:51:52,701 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289582.b8b7662a0229049c109d58ebdbba07cf. after waiting 0 ms 2024-12-11T20:51:52,701 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289582.b8b7662a0229049c109d58ebdbba07cf. 2024-12-11T20:51:52,736 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b8b7662a0229049c109d58ebdbba07cf/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-12-11T20:51:52,737 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:51:52,737 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289582.b8b7662a0229049c109d58ebdbba07cf. 2024-12-11T20:51:52,737 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1676): Region close journal for b8b7662a0229049c109d58ebdbba07cf: Waiting for close lock at 1733950312701Running coprocessor pre-close hooks at 1733950312701Disabling compacts and flushes for region at 1733950312701Disabling writes for close at 1733950312701Writing region close event to WAL at 1733950312718 (+17 ms)Running coprocessor post-close hooks at 1733950312737 (+19 ms)Closed at 1733950312737 2024-12-11T20:51:52,739 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(157): Closed b8b7662a0229049c109d58ebdbba07cf 2024-12-11T20:51:52,739 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=b8b7662a0229049c109d58ebdbba07cf, regionState=CLOSED 2024-12-11T20:51:52,741 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure b8b7662a0229049c109d58ebdbba07cf, server=5be53b084ac7,39865,1733949995112 because future has completed 2024-12-11T20:51:52,747 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=160, resume processing ppid=159 2024-12-11T20:51:52,747 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=160, ppid=159, state=SUCCESS, hasLock=false; CloseRegionProcedure b8b7662a0229049c109d58ebdbba07cf, server=5be53b084ac7,39865,1733949995112 in 197 msec 2024-12-11T20:51:52,749 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=159, resume processing ppid=158 2024-12-11T20:51:52,749 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=159, ppid=158, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=b8b7662a0229049c109d58ebdbba07cf, UNASSIGN in 205 msec 2024-12-11T20:51:52,752 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=158, resume processing ppid=157 2024-12-11T20:51:52,752 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=158, ppid=157, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 211 msec 2024-12-11T20:51:52,754 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950312754"}]},"ts":"1733950312754"} 2024-12-11T20:51:52,756 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-12-11T20:51:52,756 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-12-11T20:51:52,759 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=157, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 225 msec 2024-12-11T20:51:52,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-11T20:51:52,853 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-11T20:51:52,853 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:52,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:52,855 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:52,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:52,856 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=161, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:52,860 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b8b7662a0229049c109d58ebdbba07cf 2024-12-11T20:51:52,860 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdb9fbd0435733d400ca45a61ab02e0c 2024-12-11T20:51:52,860 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4506d0dd660defcc79919c76c0e5f252 2024-12-11T20:51:52,862 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:52,862 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdb9fbd0435733d400ca45a61ab02e0c/cf, FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdb9fbd0435733d400ca45a61ab02e0c/recovered.edits] 2024-12-11T20:51:52,863 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b8b7662a0229049c109d58ebdbba07cf/cf, FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b8b7662a0229049c109d58ebdbba07cf/recovered.edits] 2024-12-11T20:51:52,863 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4506d0dd660defcc79919c76c0e5f252/cf, FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4506d0dd660defcc79919c76c0e5f252/recovered.edits] 2024-12-11T20:51:52,868 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdb9fbd0435733d400ca45a61ab02e0c/cf/1f58c4bc26a84023b979500a7d66fc20 to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdb9fbd0435733d400ca45a61ab02e0c/cf/1f58c4bc26a84023b979500a7d66fc20 2024-12-11T20:51:52,890 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b8b7662a0229049c109d58ebdbba07cf/cf/1f58c4bc26a84023b979500a7d66fc20.fdb9fbd0435733d400ca45a61ab02e0c to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b8b7662a0229049c109d58ebdbba07cf/cf/1f58c4bc26a84023b979500a7d66fc20.fdb9fbd0435733d400ca45a61ab02e0c 2024-12-11T20:51:52,890 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b8b7662a0229049c109d58ebdbba07cf/cf/0ce3be90ab4f45be85a00b70b7d6c9f3.4506d0dd660defcc79919c76c0e5f252 to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b8b7662a0229049c109d58ebdbba07cf/cf/0ce3be90ab4f45be85a00b70b7d6c9f3.4506d0dd660defcc79919c76c0e5f252 2024-12-11T20:51:52,891 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4506d0dd660defcc79919c76c0e5f252/cf/0ce3be90ab4f45be85a00b70b7d6c9f3 to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4506d0dd660defcc79919c76c0e5f252/cf/0ce3be90ab4f45be85a00b70b7d6c9f3 2024-12-11T20:51:52,892 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdb9fbd0435733d400ca45a61ab02e0c/recovered.edits/8.seqid to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdb9fbd0435733d400ca45a61ab02e0c/recovered.edits/8.seqid 2024-12-11T20:51:52,893 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdb9fbd0435733d400ca45a61ab02e0c 2024-12-11T20:51:52,894 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b8b7662a0229049c109d58ebdbba07cf/recovered.edits/12.seqid to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b8b7662a0229049c109d58ebdbba07cf/recovered.edits/12.seqid 2024-12-11T20:51:52,894 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/b8b7662a0229049c109d58ebdbba07cf 2024-12-11T20:51:52,898 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4506d0dd660defcc79919c76c0e5f252/recovered.edits/8.seqid to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4506d0dd660defcc79919c76c0e5f252/recovered.edits/8.seqid 2024-12-11T20:51:52,898 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4506d0dd660defcc79919c76c0e5f252 2024-12-11T20:51:52,898 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-12-11T20:51:52,900 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=161, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:52,904 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-12-11T20:51:52,906 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-12-11T20:51:52,908 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=161, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:52,908 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-12-11T20:51:52,908 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289582.b8b7662a0229049c109d58ebdbba07cf.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733950312908"}]},"ts":"9223372036854775807"} 2024-12-11T20:51:52,910 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-12-11T20:51:52,910 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => b8b7662a0229049c109d58ebdbba07cf, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289582.b8b7662a0229049c109d58ebdbba07cf.', STARTKEY => '', ENDKEY => ''}] 2024-12-11T20:51:52,910 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-12-11T20:51:52,911 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733950312910"}]},"ts":"9223372036854775807"} 2024-12-11T20:51:52,913 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-12-11T20:51:52,914 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=161, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:52,915 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=161, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 61 msec 2024-12-11T20:51:52,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:52,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:52,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:52,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:52,920 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-11T20:51:52,921 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-11T20:51:52,921 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-11T20:51:52,921 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-11T20:51:52,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:52,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:52,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:51:52,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:51:52,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:52,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:51:52,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:52,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:51:52,929 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T20:51:52,929 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T20:51:52,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=161 2024-12-11T20:51:52,930 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:52,930 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-11T20:51:52,931 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T20:51:52,931 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:52,931 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T20:51:52,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=162, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:52,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-11T20:51:52,935 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950312935"}]},"ts":"1733950312935"} 2024-12-11T20:51:52,937 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-12-11T20:51:52,937 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-12-11T20:51:52,938 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-12-11T20:51:52,939 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=543d1a3f52e9986ac920e4d97e5bcf0e, UNASSIGN}, {pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=334f3e62817613989e9752453a3409fa, UNASSIGN}] 2024-12-11T20:51:52,940 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=334f3e62817613989e9752453a3409fa, UNASSIGN 2024-12-11T20:51:52,940 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=543d1a3f52e9986ac920e4d97e5bcf0e, UNASSIGN 2024-12-11T20:51:52,941 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=334f3e62817613989e9752453a3409fa, regionState=CLOSING, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:51:52,942 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=543d1a3f52e9986ac920e4d97e5bcf0e, regionState=CLOSING, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:51:52,943 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=334f3e62817613989e9752453a3409fa, UNASSIGN because future has completed 2024-12-11T20:51:52,943 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T20:51:52,943 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure 334f3e62817613989e9752453a3409fa, server=5be53b084ac7,40781,1733949995244}] 2024-12-11T20:51:52,944 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=543d1a3f52e9986ac920e4d97e5bcf0e, UNASSIGN because future has completed 2024-12-11T20:51:52,944 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T20:51:52,945 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=167, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure 543d1a3f52e9986ac920e4d97e5bcf0e, server=5be53b084ac7,40355,1733949994820}] 2024-12-11T20:51:53,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-11T20:51:53,096 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(122): Close 334f3e62817613989e9752453a3409fa 2024-12-11T20:51:53,096 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T20:51:53,097 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1722): Closing 334f3e62817613989e9752453a3409fa, disabling compactions & flushes 2024-12-11T20:51:53,097 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa. 2024-12-11T20:51:53,097 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa. 2024-12-11T20:51:53,097 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa. after waiting 0 ms 2024-12-11T20:51:53,097 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa. 2024-12-11T20:51:53,098 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(122): Close 543d1a3f52e9986ac920e4d97e5bcf0e 2024-12-11T20:51:53,098 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T20:51:53,099 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1722): Closing 543d1a3f52e9986ac920e4d97e5bcf0e, disabling compactions & flushes 2024-12-11T20:51:53,099 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e. 2024-12-11T20:51:53,099 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e. 2024-12-11T20:51:53,099 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e. after waiting 0 ms 2024-12-11T20:51:53,099 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e. 2024-12-11T20:51:53,106 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/543d1a3f52e9986ac920e4d97e5bcf0e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T20:51:53,107 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:51:53,107 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e. 2024-12-11T20:51:53,107 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1676): Region close journal for 543d1a3f52e9986ac920e4d97e5bcf0e: Waiting for close lock at 1733950313098Running coprocessor pre-close hooks at 1733950313098Disabling compacts and flushes for region at 1733950313099 (+1 ms)Disabling writes for close at 1733950313099Writing region close event to WAL at 1733950313100 (+1 ms)Running coprocessor post-close hooks at 1733950313107 (+7 ms)Closed at 1733950313107 2024-12-11T20:51:53,109 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(157): Closed 543d1a3f52e9986ac920e4d97e5bcf0e 2024-12-11T20:51:53,109 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=543d1a3f52e9986ac920e4d97e5bcf0e, regionState=CLOSED 2024-12-11T20:51:53,111 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=167, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure 543d1a3f52e9986ac920e4d97e5bcf0e, server=5be53b084ac7,40355,1733949994820 because future has completed 2024-12-11T20:51:53,111 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/334f3e62817613989e9752453a3409fa/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T20:51:53,112 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:51:53,112 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa. 2024-12-11T20:51:53,112 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1676): Region close journal for 334f3e62817613989e9752453a3409fa: Waiting for close lock at 1733950313097Running coprocessor pre-close hooks at 1733950313097Disabling compacts and flushes for region at 1733950313097Disabling writes for close at 1733950313097Writing region close event to WAL at 1733950313098 (+1 ms)Running coprocessor post-close hooks at 1733950313112 (+14 ms)Closed at 1733950313112 2024-12-11T20:51:53,114 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(157): Closed 334f3e62817613989e9752453a3409fa 2024-12-11T20:51:53,114 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=334f3e62817613989e9752453a3409fa, regionState=CLOSED 2024-12-11T20:51:53,115 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=167, resume processing ppid=164 2024-12-11T20:51:53,115 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=167, ppid=164, state=SUCCESS, hasLock=false; CloseRegionProcedure 543d1a3f52e9986ac920e4d97e5bcf0e, server=5be53b084ac7,40355,1733949994820 in 167 msec 2024-12-11T20:51:53,117 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=166, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure 334f3e62817613989e9752453a3409fa, server=5be53b084ac7,40781,1733949995244 because future has completed 2024-12-11T20:51:53,118 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=164, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=543d1a3f52e9986ac920e4d97e5bcf0e, UNASSIGN in 176 msec 2024-12-11T20:51:53,126 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=166, resume processing ppid=165 2024-12-11T20:51:53,127 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=166, ppid=165, state=SUCCESS, hasLock=false; CloseRegionProcedure 334f3e62817613989e9752453a3409fa, server=5be53b084ac7,40781,1733949995244 in 176 msec 2024-12-11T20:51:53,129 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=165, resume processing ppid=163 2024-12-11T20:51:53,129 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=165, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=334f3e62817613989e9752453a3409fa, UNASSIGN in 188 msec 2024-12-11T20:51:53,131 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=163, resume processing ppid=162 2024-12-11T20:51:53,131 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=163, ppid=162, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 192 msec 2024-12-11T20:51:53,133 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950313132"}]},"ts":"1733950313132"} 2024-12-11T20:51:53,134 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-12-11T20:51:53,134 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-12-11T20:51:53,136 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=162, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 204 msec 2024-12-11T20:51:53,170 INFO [regionserver/5be53b084ac7:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706. because 92982b2478254451b467ff12e9fe4706/l has an old edit so flush to free WALs after random delay 220252 ms 2024-12-11T20:51:53,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-11T20:51:53,253 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-11T20:51:53,253 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:53,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:53,256 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:53,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:53,258 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=168, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:53,260 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:53,262 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/334f3e62817613989e9752453a3409fa 2024-12-11T20:51:53,262 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/543d1a3f52e9986ac920e4d97e5bcf0e 2024-12-11T20:51:53,263 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/543d1a3f52e9986ac920e4d97e5bcf0e/cf, FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/543d1a3f52e9986ac920e4d97e5bcf0e/recovered.edits] 2024-12-11T20:51:53,263 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/334f3e62817613989e9752453a3409fa/cf, FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/334f3e62817613989e9752453a3409fa/recovered.edits] 2024-12-11T20:51:53,285 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/334f3e62817613989e9752453a3409fa/cf/b6c1817fd3fc463fa4eb27f19fd8b79f to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/334f3e62817613989e9752453a3409fa/cf/b6c1817fd3fc463fa4eb27f19fd8b79f 2024-12-11T20:51:53,285 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/543d1a3f52e9986ac920e4d97e5bcf0e/cf/ab977b8e907244709bff076522be305b to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/543d1a3f52e9986ac920e4d97e5bcf0e/cf/ab977b8e907244709bff076522be305b 2024-12-11T20:51:53,287 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/334f3e62817613989e9752453a3409fa/recovered.edits/9.seqid to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/334f3e62817613989e9752453a3409fa/recovered.edits/9.seqid 2024-12-11T20:51:53,288 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/543d1a3f52e9986ac920e4d97e5bcf0e/recovered.edits/9.seqid to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/543d1a3f52e9986ac920e4d97e5bcf0e/recovered.edits/9.seqid 2024-12-11T20:51:53,288 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/334f3e62817613989e9752453a3409fa 2024-12-11T20:51:53,288 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithMergeRegion/543d1a3f52e9986ac920e4d97e5bcf0e 2024-12-11T20:51:53,288 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-12-11T20:51:53,290 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=168, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:53,292 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-12-11T20:51:53,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:53,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:53,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:53,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:53,294 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-12-11T20:51:53,295 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-11T20:51:53,295 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-11T20:51:53,295 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-11T20:51:53,295 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-11T20:51:53,296 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=168, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:53,296 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-12-11T20:51:53,296 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733950313296"}]},"ts":"9223372036854775807"} 2024-12-11T20:51:53,296 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733950313296"}]},"ts":"9223372036854775807"} 2024-12-11T20:51:53,298 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T20:51:53,298 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 543d1a3f52e9986ac920e4d97e5bcf0e, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733950286438.543d1a3f52e9986ac920e4d97e5bcf0e.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 334f3e62817613989e9752453a3409fa, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733950286438.334f3e62817613989e9752453a3409fa.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T20:51:53,298 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-12-11T20:51:53,298 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733950313298"}]},"ts":"9223372036854775807"} 2024-12-11T20:51:53,299 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-12-11T20:51:53,300 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=168, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:53,301 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=168, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 47 msec 2024-12-11T20:51:53,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:53,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:51:53,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:53,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:53,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:51:53,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:51:53,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:53,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:51:53,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-12-11T20:51:53,304 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:53,304 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-11T20:51:53,312 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-12-11T20:51:53,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:53,319 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-12-11T20:51:53,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:53,332 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" type: DISABLED 2024-12-11T20:51:53,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:53,362 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=815 (was 806) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_754086280_22 at /127.0.0.1:53130 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (166867004) connection to localhost/127.0.0.1:33037 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Thread-5756 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33037 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1318233647_1 at /127.0.0.1:37100 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_754086280_22 at /127.0.0.1:41690 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_754086280_22 at /127.0.0.1:39212 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1318233647_1 at /127.0.0.1:52410 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 128284) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=815 (was 793) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1005 (was 1211), ProcessCount=17 (was 17), AvailableMemoryMB=561 (was 1066) 2024-12-11T20:51:53,362 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=815 is superior to 500 2024-12-11T20:51:53,385 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=815, OpenFileDescriptor=815, MaxFileDescriptor=1048576, SystemLoadAverage=1005, ProcessCount=17, AvailableMemoryMB=559 2024-12-11T20:51:53,385 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=815 is superior to 500 2024-12-11T20:51:53,386 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T20:51:53,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-11T20:51:53,388 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T20:51:53,388 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:51:53,388 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 169 2024-12-11T20:51:53,389 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T20:51:53,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-11T20:51:53,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742209_1385 (size=407) 2024-12-11T20:51:53,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742209_1385 (size=407) 2024-12-11T20:51:53,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742209_1385 (size=407) 2024-12-11T20:51:53,439 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c5defaa8fa0dad7e821babe09e979667, NAME => 'testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:51:53,440 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 3d939f248aeae78796be97c6162b82cf, NAME => 'testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:51:53,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742210_1386 (size=68) 2024-12-11T20:51:53,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742210_1386 (size=68) 2024-12-11T20:51:53,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742210_1386 (size=68) 2024-12-11T20:51:53,485 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:51:53,485 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 3d939f248aeae78796be97c6162b82cf, disabling compactions & flushes 2024-12-11T20:51:53,485 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf. 2024-12-11T20:51:53,485 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf. 2024-12-11T20:51:53,485 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf. after waiting 0 ms 2024-12-11T20:51:53,485 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf. 2024-12-11T20:51:53,485 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf. 2024-12-11T20:51:53,485 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 3d939f248aeae78796be97c6162b82cf: Waiting for close lock at 1733950313485Disabling compacts and flushes for region at 1733950313485Disabling writes for close at 1733950313485Writing region close event to WAL at 1733950313485Closed at 1733950313485 2024-12-11T20:51:53,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-11T20:51:53,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742211_1387 (size=68) 2024-12-11T20:51:53,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742211_1387 (size=68) 2024-12-11T20:51:53,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742211_1387 (size=68) 2024-12-11T20:51:53,502 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:51:53,502 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing c5defaa8fa0dad7e821babe09e979667, disabling compactions & flushes 2024-12-11T20:51:53,502 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667. 2024-12-11T20:51:53,502 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667. 2024-12-11T20:51:53,502 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667. after waiting 0 ms 2024-12-11T20:51:53,502 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667. 2024-12-11T20:51:53,502 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667. 2024-12-11T20:51:53,502 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for c5defaa8fa0dad7e821babe09e979667: Waiting for close lock at 1733950313502Disabling compacts and flushes for region at 1733950313502Disabling writes for close at 1733950313502Writing region close event to WAL at 1733950313502Closed at 1733950313502 2024-12-11T20:51:53,503 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T20:51:53,503 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733950313503"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950313503"}]},"ts":"1733950313503"} 2024-12-11T20:51:53,503 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733950313503"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950313503"}]},"ts":"1733950313503"} 2024-12-11T20:51:53,505 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T20:51:53,506 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T20:51:53,506 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950313506"}]},"ts":"1733950313506"} 2024-12-11T20:51:53,507 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-11T20:51:53,508 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {5be53b084ac7=0} racks are {/default-rack=0} 2024-12-11T20:51:53,509 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T20:51:53,509 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T20:51:53,509 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T20:51:53,509 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T20:51:53,509 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T20:51:53,509 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T20:51:53,509 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T20:51:53,509 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T20:51:53,509 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T20:51:53,509 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T20:51:53,510 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c5defaa8fa0dad7e821babe09e979667, ASSIGN}, {pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3d939f248aeae78796be97c6162b82cf, ASSIGN}] 2024-12-11T20:51:53,511 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3d939f248aeae78796be97c6162b82cf, ASSIGN 2024-12-11T20:51:53,511 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c5defaa8fa0dad7e821babe09e979667, ASSIGN 2024-12-11T20:51:53,511 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3d939f248aeae78796be97c6162b82cf, ASSIGN; state=OFFLINE, location=5be53b084ac7,40355,1733949994820; forceNewPlan=false, retain=false 2024-12-11T20:51:53,512 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c5defaa8fa0dad7e821babe09e979667, ASSIGN; state=OFFLINE, location=5be53b084ac7,39865,1733949995112; forceNewPlan=false, retain=false 2024-12-11T20:51:53,662 INFO [5be53b084ac7:41779 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T20:51:53,662 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=3d939f248aeae78796be97c6162b82cf, regionState=OPENING, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:51:53,662 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=c5defaa8fa0dad7e821babe09e979667, regionState=OPENING, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:51:53,664 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3d939f248aeae78796be97c6162b82cf, ASSIGN because future has completed 2024-12-11T20:51:53,664 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3d939f248aeae78796be97c6162b82cf, server=5be53b084ac7,40355,1733949994820}] 2024-12-11T20:51:53,666 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c5defaa8fa0dad7e821babe09e979667, ASSIGN because future has completed 2024-12-11T20:51:53,666 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=173, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure c5defaa8fa0dad7e821babe09e979667, server=5be53b084ac7,39865,1733949995112}] 2024-12-11T20:51:53,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-11T20:51:53,819 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf. 2024-12-11T20:51:53,819 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7752): Opening region: {ENCODED => 3d939f248aeae78796be97c6162b82cf, NAME => 'testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T20:51:53,819 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667. 2024-12-11T20:51:53,819 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7752): Opening region: {ENCODED => c5defaa8fa0dad7e821babe09e979667, NAME => 'testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T20:51:53,819 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf. service=AccessControlService 2024-12-11T20:51:53,820 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667. service=AccessControlService 2024-12-11T20:51:53,820 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:51:53,820 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:51:53,820 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 3d939f248aeae78796be97c6162b82cf 2024-12-11T20:51:53,820 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot c5defaa8fa0dad7e821babe09e979667 2024-12-11T20:51:53,820 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:51:53,820 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:51:53,820 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7794): checking encryption for 3d939f248aeae78796be97c6162b82cf 2024-12-11T20:51:53,820 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7794): checking encryption for c5defaa8fa0dad7e821babe09e979667 2024-12-11T20:51:53,820 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7797): checking classloading for 3d939f248aeae78796be97c6162b82cf 2024-12-11T20:51:53,820 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7797): checking classloading for c5defaa8fa0dad7e821babe09e979667 2024-12-11T20:51:53,821 INFO [StoreOpener-3d939f248aeae78796be97c6162b82cf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3d939f248aeae78796be97c6162b82cf 2024-12-11T20:51:53,822 INFO [StoreOpener-3d939f248aeae78796be97c6162b82cf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3d939f248aeae78796be97c6162b82cf columnFamilyName cf 2024-12-11T20:51:53,822 DEBUG [StoreOpener-3d939f248aeae78796be97c6162b82cf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:51:53,823 INFO [StoreOpener-c5defaa8fa0dad7e821babe09e979667-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c5defaa8fa0dad7e821babe09e979667 2024-12-11T20:51:53,824 INFO [StoreOpener-3d939f248aeae78796be97c6162b82cf-1 {}] regionserver.HStore(327): Store=3d939f248aeae78796be97c6162b82cf/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:51:53,824 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1038): replaying wal for 3d939f248aeae78796be97c6162b82cf 2024-12-11T20:51:53,825 INFO [StoreOpener-c5defaa8fa0dad7e821babe09e979667-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c5defaa8fa0dad7e821babe09e979667 columnFamilyName cf 2024-12-11T20:51:53,825 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/3d939f248aeae78796be97c6162b82cf 2024-12-11T20:51:53,825 DEBUG [StoreOpener-c5defaa8fa0dad7e821babe09e979667-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:51:53,826 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/3d939f248aeae78796be97c6162b82cf 2024-12-11T20:51:53,826 INFO [StoreOpener-c5defaa8fa0dad7e821babe09e979667-1 {}] regionserver.HStore(327): Store=c5defaa8fa0dad7e821babe09e979667/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:51:53,826 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1048): stopping wal replay for 3d939f248aeae78796be97c6162b82cf 2024-12-11T20:51:53,826 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1060): Cleaning up temporary data for 3d939f248aeae78796be97c6162b82cf 2024-12-11T20:51:53,826 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1038): replaying wal for c5defaa8fa0dad7e821babe09e979667 2024-12-11T20:51:53,829 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/c5defaa8fa0dad7e821babe09e979667 2024-12-11T20:51:53,829 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/c5defaa8fa0dad7e821babe09e979667 2024-12-11T20:51:53,829 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1048): stopping wal replay for c5defaa8fa0dad7e821babe09e979667 2024-12-11T20:51:53,829 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1060): Cleaning up temporary data for c5defaa8fa0dad7e821babe09e979667 2024-12-11T20:51:53,832 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1093): writing seq id for 3d939f248aeae78796be97c6162b82cf 2024-12-11T20:51:53,833 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1093): writing seq id for c5defaa8fa0dad7e821babe09e979667 2024-12-11T20:51:53,835 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/3d939f248aeae78796be97c6162b82cf/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:51:53,836 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/c5defaa8fa0dad7e821babe09e979667/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:51:53,836 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1114): Opened 3d939f248aeae78796be97c6162b82cf; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61209696, jitterRate=-0.08790445327758789}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:51:53,836 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3d939f248aeae78796be97c6162b82cf 2024-12-11T20:51:53,837 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1114): Opened c5defaa8fa0dad7e821babe09e979667; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65944499, jitterRate=-0.017350390553474426}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:51:53,837 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c5defaa8fa0dad7e821babe09e979667 2024-12-11T20:51:53,837 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1006): Region open journal for 3d939f248aeae78796be97c6162b82cf: Running coprocessor pre-open hook at 1733950313820Writing region info on filesystem at 1733950313820Initializing all the Stores at 1733950313821 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950313821Cleaning up temporary data from old regions at 1733950313826 (+5 ms)Running coprocessor post-open hooks at 1733950313836 (+10 ms)Region opened successfully at 1733950313837 (+1 ms) 2024-12-11T20:51:53,837 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1006): Region open journal for c5defaa8fa0dad7e821babe09e979667: Running coprocessor pre-open hook at 1733950313820Writing region info on filesystem at 1733950313820Initializing all the Stores at 1733950313821 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950313821Cleaning up temporary data from old regions at 1733950313829 (+8 ms)Running coprocessor post-open hooks at 1733950313837 (+8 ms)Region opened successfully at 1733950313837 2024-12-11T20:51:53,838 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf., pid=172, masterSystemTime=1733950313816 2024-12-11T20:51:53,838 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667., pid=173, masterSystemTime=1733950313817 2024-12-11T20:51:53,841 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667. 2024-12-11T20:51:53,841 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667. 2024-12-11T20:51:53,841 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=c5defaa8fa0dad7e821babe09e979667, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:51:53,844 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf. 2024-12-11T20:51:53,844 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf. 2024-12-11T20:51:53,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=173, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure c5defaa8fa0dad7e821babe09e979667, server=5be53b084ac7,39865,1733949995112 because future has completed 2024-12-11T20:51:53,845 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=3d939f248aeae78796be97c6162b82cf, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:51:53,848 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=172, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3d939f248aeae78796be97c6162b82cf, server=5be53b084ac7,40355,1733949994820 because future has completed 2024-12-11T20:51:53,849 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=173, resume processing ppid=170 2024-12-11T20:51:53,851 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=173, ppid=170, state=SUCCESS, hasLock=false; OpenRegionProcedure c5defaa8fa0dad7e821babe09e979667, server=5be53b084ac7,39865,1733949995112 in 180 msec 2024-12-11T20:51:53,852 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=170, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c5defaa8fa0dad7e821babe09e979667, ASSIGN in 340 msec 2024-12-11T20:51:53,852 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=172, resume processing ppid=171 2024-12-11T20:51:53,852 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=172, ppid=171, state=SUCCESS, hasLock=false; OpenRegionProcedure 3d939f248aeae78796be97c6162b82cf, server=5be53b084ac7,40355,1733949994820 in 185 msec 2024-12-11T20:51:53,856 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=171, resume processing ppid=169 2024-12-11T20:51:53,856 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=171, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3d939f248aeae78796be97c6162b82cf, ASSIGN in 342 msec 2024-12-11T20:51:53,858 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T20:51:53,858 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950313858"}]},"ts":"1733950313858"} 2024-12-11T20:51:53,861 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-11T20:51:53,863 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T20:51:53,863 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-12-11T20:51:53,868 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-11T20:51:53,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:51:53,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:51:53,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:51:53,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:51:53,945 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:51:53,945 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:51:53,945 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:51:53,946 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:51:53,947 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=169, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 559 msec 2024-12-11T20:51:53,973 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-11T20:51:53,973 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-11T20:51:53,974 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T20:51:53,974 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T20:51:54,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-11T20:51:54,013 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-11T20:51:54,013 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-12-11T20:51:54,013 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:51:54,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-12-11T20:51:54,018 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:51:54,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportExpiredSnapshot assigned. 2024-12-11T20:51:54,018 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T20:51:54,021 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-11T20:51:54,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733950314021 (current time:1733950314021). 2024-12-11T20:51:54,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T20:51:54,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-11T20:51:54,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T20:51:54,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b9d9be5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:54,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:51:54,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:51:54,024 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:51:54,024 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:51:54,024 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:51:54,024 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ffa2317, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:54,024 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:51:54,024 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:51:54,025 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:54,025 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58932, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:51:54,026 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6df36ba3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:54,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:51:54,027 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:51:54,027 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:51:54,028 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37538, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:51:54,029 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:51:54,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:51:54,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:54,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:54,029 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:51:54,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2870bd32, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:54,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:51:54,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:51:54,030 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:51:54,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:51:54,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:51:54,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f47b7b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:54,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:51:54,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:51:54,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:54,032 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58954, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:51:54,032 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54751314, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:54,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:51:54,033 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:51:54,034 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:51:54,035 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37540, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:51:54,036 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:51:54,037 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:51:54,038 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42774, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:51:54,039 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:51:54,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:51:54,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:54,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:54,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-11T20:51:54,040 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:51:54,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T20:51:54,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-11T20:51:54,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-12-11T20:51:54,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-11T20:51:54,043 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T20:51:54,045 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T20:51:54,055 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T20:51:54,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742212_1388 (size=170) 2024-12-11T20:51:54,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742212_1388 (size=170) 2024-12-11T20:51:54,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742212_1388 (size=170) 2024-12-11T20:51:54,067 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T20:51:54,067 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c5defaa8fa0dad7e821babe09e979667}, {pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3d939f248aeae78796be97c6162b82cf}] 2024-12-11T20:51:54,068 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3d939f248aeae78796be97c6162b82cf 2024-12-11T20:51:54,069 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c5defaa8fa0dad7e821babe09e979667 2024-12-11T20:51:54,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-11T20:51:54,221 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=175 2024-12-11T20:51:54,221 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667. 2024-12-11T20:51:54,221 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.HRegion(2603): Flush status journal for c5defaa8fa0dad7e821babe09e979667: 2024-12-11T20:51:54,221 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-11T20:51:54,221 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-11T20:51:54,221 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:51:54,221 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T20:51:54,223 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40355 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=176 2024-12-11T20:51:54,223 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf. 2024-12-11T20:51:54,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.HRegion(2603): Flush status journal for 3d939f248aeae78796be97c6162b82cf: 2024-12-11T20:51:54,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-11T20:51:54,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-11T20:51:54,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:51:54,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T20:51:54,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742214_1390 (size=71) 2024-12-11T20:51:54,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742214_1390 (size=71) 2024-12-11T20:51:54,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742214_1390 (size=71) 2024-12-11T20:51:54,260 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf. 2024-12-11T20:51:54,260 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-12-11T20:51:54,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=176 2024-12-11T20:51:54,260 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 3d939f248aeae78796be97c6162b82cf 2024-12-11T20:51:54,260 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3d939f248aeae78796be97c6162b82cf 2024-12-11T20:51:54,269 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=176, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3d939f248aeae78796be97c6162b82cf in 200 msec 2024-12-11T20:51:54,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742213_1389 (size=71) 2024-12-11T20:51:54,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742213_1389 (size=71) 2024-12-11T20:51:54,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742213_1389 (size=71) 2024-12-11T20:51:54,275 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667. 2024-12-11T20:51:54,275 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=175 2024-12-11T20:51:54,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=175 2024-12-11T20:51:54,276 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region c5defaa8fa0dad7e821babe09e979667 2024-12-11T20:51:54,276 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c5defaa8fa0dad7e821babe09e979667 2024-12-11T20:51:54,290 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=175, resume processing ppid=174 2024-12-11T20:51:54,290 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=175, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c5defaa8fa0dad7e821babe09e979667 in 210 msec 2024-12-11T20:51:54,290 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T20:51:54,292 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T20:51:54,297 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T20:51:54,297 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-12-11T20:51:54,297 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-12-11T20:51:54,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742215_1391 (size=552) 2024-12-11T20:51:54,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742215_1391 (size=552) 2024-12-11T20:51:54,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742215_1391 (size=552) 2024-12-11T20:51:54,325 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T20:51:54,330 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T20:51:54,331 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-12-11T20:51:54,339 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T20:51:54,339 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-12-11T20:51:54,341 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=174, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 299 msec 2024-12-11T20:51:54,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-11T20:51:54,362 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-11T20:51:54,366 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='09cb1aa130719cb5ba89a106cc047cba2', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:51:54,371 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='1716494d0c09fba210b6b427f2bae704d', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf., hostname=5be53b084ac7,40355,1733949994820, seqNum=2] 2024-12-11T20:51:54,372 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='20276d7355002e847f025dea61f9c5ce5', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf., hostname=5be53b084ac7,40355,1733949994820, seqNum=2] 2024-12-11T20:51:54,372 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='3d77bec43cc70a6f08a3b0aece36f0e67', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf., hostname=5be53b084ac7,40355,1733949994820, seqNum=2] 2024-12-11T20:51:54,373 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='4e63cace60b8344c692ec103479c67133', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf., hostname=5be53b084ac7,40355,1733949994820, seqNum=2] 2024-12-11T20:51:54,376 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='5f804736527164b86567522f1ba940094', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf., hostname=5be53b084ac7,40355,1733949994820, seqNum=2] 2024-12-11T20:51:54,377 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39865 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T20:51:54,377 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='6f6afb730e2492a439e6e6329b69bc8df', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf., hostname=5be53b084ac7,40355,1733949994820, seqNum=2] 2024-12-11T20:51:54,378 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='fc17e34814bd4719ca924929fbac178f', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf., hostname=5be53b084ac7,40355,1733949994820, seqNum=2] 2024-12-11T20:51:54,378 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='50c8ec3fd5d9c560caeddc91fdba44d9', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf., hostname=5be53b084ac7,40355,1733949994820, seqNum=2] 2024-12-11T20:51:54,379 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40355 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T20:51:54,380 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T20:51:54,392 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-11T20:51:54,393 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667. 2024-12-11T20:51:54,393 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:51:54,395 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T20:51:54,403 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T20:51:54,412 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T20:51:54,418 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-11T20:51:54,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733950314418 (current time:1733950314418). 2024-12-11T20:51:54,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T20:51:54,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-11T20:51:54,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T20:51:54,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3070def3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:54,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:51:54,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:51:54,420 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:51:54,420 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:51:54,420 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:51:54,420 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42177079, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:54,420 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:51:54,420 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:51:54,421 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:54,422 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58968, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:51:54,424 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@718c00d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:54,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:51:54,425 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:51:54,426 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:51:54,427 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37542, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:51:54,428 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:51:54,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:51:54,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:54,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:54,429 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:51:54,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@363b1d57, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:54,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:51:54,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:51:54,437 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:51:54,437 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:51:54,437 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:51:54,437 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ffe65f5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:54,437 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:51:54,437 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:51:54,438 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:54,439 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58984, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:51:54,440 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76687731, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:54,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:51:54,441 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:51:54,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:51:54,443 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37546, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:51:54,446 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:51:54,446 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:51:54,448 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42782, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:51:54,449 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:51:54,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:51:54,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:54,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:54,449 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:51:54,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-11T20:51:54,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T20:51:54,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-11T20:51:54,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-12-11T20:51:54,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-11T20:51:54,457 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T20:51:54,458 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T20:51:54,462 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T20:51:54,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742216_1392 (size=165) 2024-12-11T20:51:54,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742216_1392 (size=165) 2024-12-11T20:51:54,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742216_1392 (size=165) 2024-12-11T20:51:54,518 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T20:51:54,518 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c5defaa8fa0dad7e821babe09e979667}, {pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3d939f248aeae78796be97c6162b82cf}] 2024-12-11T20:51:54,520 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3d939f248aeae78796be97c6162b82cf 2024-12-11T20:51:54,520 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c5defaa8fa0dad7e821babe09e979667 2024-12-11T20:51:54,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-11T20:51:54,680 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40355 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=179 2024-12-11T20:51:54,680 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=178 2024-12-11T20:51:54,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667. 2024-12-11T20:51:54,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf. 2024-12-11T20:51:54,680 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2902): Flushing c5defaa8fa0dad7e821babe09e979667 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-11T20:51:54,681 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2902): Flushing 3d939f248aeae78796be97c6162b82cf 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-11T20:51:54,735 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/3d939f248aeae78796be97c6162b82cf/.tmp/cf/9a426d81da74412ca488db067bb90e4c is 71, key is 1716819e31c46b58481ecc2f914a25c8/cf:q/1733950314378/Put/seqid=0 2024-12-11T20:51:54,742 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/c5defaa8fa0dad7e821babe09e979667/.tmp/cf/a829cebce39e4d65b8e6c786e436521d is 71, key is 04c6a7c2f2f82cf3c5b505969e4755ef/cf:q/1733950314377/Put/seqid=0 2024-12-11T20:51:54,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742217_1393 (size=8326) 2024-12-11T20:51:54,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742217_1393 (size=8326) 2024-12-11T20:51:54,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742217_1393 (size=8326) 2024-12-11T20:51:54,750 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/3d939f248aeae78796be97c6162b82cf/.tmp/cf/9a426d81da74412ca488db067bb90e4c 2024-12-11T20:51:54,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742218_1394 (size=5288) 2024-12-11T20:51:54,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742218_1394 (size=5288) 2024-12-11T20:51:54,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742218_1394 (size=5288) 2024-12-11T20:51:54,758 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/3d939f248aeae78796be97c6162b82cf/.tmp/cf/9a426d81da74412ca488db067bb90e4c as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/3d939f248aeae78796be97c6162b82cf/cf/9a426d81da74412ca488db067bb90e4c 2024-12-11T20:51:54,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-11T20:51:54,779 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/c5defaa8fa0dad7e821babe09e979667/.tmp/cf/a829cebce39e4d65b8e6c786e436521d 2024-12-11T20:51:54,783 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/3d939f248aeae78796be97c6162b82cf/cf/9a426d81da74412ca488db067bb90e4c, entries=47, sequenceid=6, filesize=8.1 K 2024-12-11T20:51:54,784 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/c5defaa8fa0dad7e821babe09e979667/.tmp/cf/a829cebce39e4d65b8e6c786e436521d as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/c5defaa8fa0dad7e821babe09e979667/cf/a829cebce39e4d65b8e6c786e436521d 2024-12-11T20:51:54,784 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 3d939f248aeae78796be97c6162b82cf in 103ms, sequenceid=6, compaction requested=false 2024-12-11T20:51:54,784 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-12-11T20:51:54,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2603): Flush status journal for 3d939f248aeae78796be97c6162b82cf: 2024-12-11T20:51:54,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf. for snaptb0-testExportExpiredSnapshot completed. 2024-12-11T20:51:54,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-11T20:51:54,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:51:54,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/3d939f248aeae78796be97c6162b82cf/cf/9a426d81da74412ca488db067bb90e4c] hfiles 2024-12-11T20:51:54,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/3d939f248aeae78796be97c6162b82cf/cf/9a426d81da74412ca488db067bb90e4c for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-11T20:51:54,789 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/c5defaa8fa0dad7e821babe09e979667/cf/a829cebce39e4d65b8e6c786e436521d, entries=3, sequenceid=6, filesize=5.2 K 2024-12-11T20:51:54,790 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for c5defaa8fa0dad7e821babe09e979667 in 109ms, sequenceid=6, compaction requested=false 2024-12-11T20:51:54,790 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2603): Flush status journal for c5defaa8fa0dad7e821babe09e979667: 2024-12-11T20:51:54,790 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667. for snaptb0-testExportExpiredSnapshot completed. 2024-12-11T20:51:54,790 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-11T20:51:54,790 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:51:54,790 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/c5defaa8fa0dad7e821babe09e979667/cf/a829cebce39e4d65b8e6c786e436521d] hfiles 2024-12-11T20:51:54,790 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/c5defaa8fa0dad7e821babe09e979667/cf/a829cebce39e4d65b8e6c786e436521d for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-11T20:51:54,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742219_1395 (size=110) 2024-12-11T20:51:54,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742219_1395 (size=110) 2024-12-11T20:51:54,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742219_1395 (size=110) 2024-12-11T20:51:54,793 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf. 2024-12-11T20:51:54,793 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=179 2024-12-11T20:51:54,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=179 2024-12-11T20:51:54,794 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 3d939f248aeae78796be97c6162b82cf 2024-12-11T20:51:54,794 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3d939f248aeae78796be97c6162b82cf 2024-12-11T20:51:54,796 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=179, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3d939f248aeae78796be97c6162b82cf in 276 msec 2024-12-11T20:51:54,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742220_1396 (size=110) 2024-12-11T20:51:54,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742220_1396 (size=110) 2024-12-11T20:51:54,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742220_1396 (size=110) 2024-12-11T20:51:54,797 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667. 2024-12-11T20:51:54,797 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-12-11T20:51:54,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=178 2024-12-11T20:51:54,798 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region c5defaa8fa0dad7e821babe09e979667 2024-12-11T20:51:54,798 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c5defaa8fa0dad7e821babe09e979667 2024-12-11T20:51:54,800 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=178, resume processing ppid=177 2024-12-11T20:51:54,800 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=178, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c5defaa8fa0dad7e821babe09e979667 in 280 msec 2024-12-11T20:51:54,800 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T20:51:54,800 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T20:51:54,801 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T20:51:54,801 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-12-11T20:51:54,801 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-12-11T20:51:54,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742221_1397 (size=630) 2024-12-11T20:51:54,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742221_1397 (size=630) 2024-12-11T20:51:54,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742221_1397 (size=630) 2024-12-11T20:51:54,810 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T20:51:54,815 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T20:51:54,815 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-12-11T20:51:54,817 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T20:51:54,817 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-12-11T20:51:54,818 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=177, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 365 msec 2024-12-11T20:51:55,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-11T20:51:55,083 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-11T20:51:55,084 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T20:51:55,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot 2024-12-11T20:51:55,092 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T20:51:55,092 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:51:55,092 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 180 2024-12-11T20:51:55,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-11T20:51:55,093 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T20:51:55,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742222_1398 (size=400) 2024-12-11T20:51:55,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742222_1398 (size=400) 2024-12-11T20:51:55,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742222_1398 (size=400) 2024-12-11T20:51:55,114 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 08787acc4c8783f43f2b29bbed40a0de, NAME => 'testExportExpiredSnapshot,,1733950315084.08787acc4c8783f43f2b29bbed40a0de.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:51:55,115 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 58264c5b234b4bd11beed47f41492f9c, NAME => 'testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:51:55,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742223_1399 (size=61) 2024-12-11T20:51:55,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742223_1399 (size=61) 2024-12-11T20:51:55,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742223_1399 (size=61) 2024-12-11T20:51:55,149 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:51:55,149 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 58264c5b234b4bd11beed47f41492f9c, disabling compactions & flushes 2024-12-11T20:51:55,149 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c. 2024-12-11T20:51:55,149 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c. 2024-12-11T20:51:55,149 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c. after waiting 0 ms 2024-12-11T20:51:55,149 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c. 2024-12-11T20:51:55,149 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c. 2024-12-11T20:51:55,149 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 58264c5b234b4bd11beed47f41492f9c: Waiting for close lock at 1733950315149Disabling compacts and flushes for region at 1733950315149Disabling writes for close at 1733950315149Writing region close event to WAL at 1733950315149Closed at 1733950315149 2024-12-11T20:51:55,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742224_1400 (size=61) 2024-12-11T20:51:55,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742224_1400 (size=61) 2024-12-11T20:51:55,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742224_1400 (size=61) 2024-12-11T20:51:55,153 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1733950315084.08787acc4c8783f43f2b29bbed40a0de.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:51:55,153 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing 08787acc4c8783f43f2b29bbed40a0de, disabling compactions & flushes 2024-12-11T20:51:55,153 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1733950315084.08787acc4c8783f43f2b29bbed40a0de. 2024-12-11T20:51:55,153 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1733950315084.08787acc4c8783f43f2b29bbed40a0de. 2024-12-11T20:51:55,153 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1733950315084.08787acc4c8783f43f2b29bbed40a0de. after waiting 0 ms 2024-12-11T20:51:55,153 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1733950315084.08787acc4c8783f43f2b29bbed40a0de. 2024-12-11T20:51:55,153 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1733950315084.08787acc4c8783f43f2b29bbed40a0de. 2024-12-11T20:51:55,153 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for 08787acc4c8783f43f2b29bbed40a0de: Waiting for close lock at 1733950315153Disabling compacts and flushes for region at 1733950315153Disabling writes for close at 1733950315153Writing region close event to WAL at 1733950315153Closed at 1733950315153 2024-12-11T20:51:55,154 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T20:51:55,154 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733950315154"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950315154"}]},"ts":"1733950315154"} 2024-12-11T20:51:55,154 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1733950315084.08787acc4c8783f43f2b29bbed40a0de.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733950315154"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950315154"}]},"ts":"1733950315154"} 2024-12-11T20:51:55,156 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T20:51:55,157 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T20:51:55,157 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950315157"}]},"ts":"1733950315157"} 2024-12-11T20:51:55,159 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-11T20:51:55,159 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {5be53b084ac7=0} racks are {/default-rack=0} 2024-12-11T20:51:55,160 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T20:51:55,160 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T20:51:55,160 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T20:51:55,160 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T20:51:55,160 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T20:51:55,161 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T20:51:55,161 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T20:51:55,161 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T20:51:55,161 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T20:51:55,161 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T20:51:55,161 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=08787acc4c8783f43f2b29bbed40a0de, ASSIGN}, {pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=58264c5b234b4bd11beed47f41492f9c, ASSIGN}] 2024-12-11T20:51:55,162 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=08787acc4c8783f43f2b29bbed40a0de, ASSIGN 2024-12-11T20:51:55,162 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=58264c5b234b4bd11beed47f41492f9c, ASSIGN 2024-12-11T20:51:55,163 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=58264c5b234b4bd11beed47f41492f9c, ASSIGN; state=OFFLINE, location=5be53b084ac7,39865,1733949995112; forceNewPlan=false, retain=false 2024-12-11T20:51:55,163 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=08787acc4c8783f43f2b29bbed40a0de, ASSIGN; state=OFFLINE, location=5be53b084ac7,40781,1733949995244; forceNewPlan=false, retain=false 2024-12-11T20:51:55,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-11T20:51:55,313 INFO [5be53b084ac7:41779 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T20:51:55,314 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=58264c5b234b4bd11beed47f41492f9c, regionState=OPENING, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:51:55,314 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=08787acc4c8783f43f2b29bbed40a0de, regionState=OPENING, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:51:55,315 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=58264c5b234b4bd11beed47f41492f9c, ASSIGN because future has completed 2024-12-11T20:51:55,316 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=183, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure 58264c5b234b4bd11beed47f41492f9c, server=5be53b084ac7,39865,1733949995112}] 2024-12-11T20:51:55,317 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=08787acc4c8783f43f2b29bbed40a0de, ASSIGN because future has completed 2024-12-11T20:51:55,317 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=184, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure 08787acc4c8783f43f2b29bbed40a0de, server=5be53b084ac7,40781,1733949995244}] 2024-12-11T20:51:55,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-11T20:51:55,471 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c. 2024-12-11T20:51:55,471 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7752): Opening region: {ENCODED => 58264c5b234b4bd11beed47f41492f9c, NAME => 'testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T20:51:55,472 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c. service=AccessControlService 2024-12-11T20:51:55,472 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,,1733950315084.08787acc4c8783f43f2b29bbed40a0de. 2024-12-11T20:51:55,472 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:51:55,472 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7752): Opening region: {ENCODED => 08787acc4c8783f43f2b29bbed40a0de, NAME => 'testExportExpiredSnapshot,,1733950315084.08787acc4c8783f43f2b29bbed40a0de.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T20:51:55,472 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 58264c5b234b4bd11beed47f41492f9c 2024-12-11T20:51:55,472 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,,1733950315084.08787acc4c8783f43f2b29bbed40a0de. service=AccessControlService 2024-12-11T20:51:55,472 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:51:55,472 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:51:55,472 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7794): checking encryption for 58264c5b234b4bd11beed47f41492f9c 2024-12-11T20:51:55,472 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7797): checking classloading for 58264c5b234b4bd11beed47f41492f9c 2024-12-11T20:51:55,472 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 08787acc4c8783f43f2b29bbed40a0de 2024-12-11T20:51:55,472 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1733950315084.08787acc4c8783f43f2b29bbed40a0de.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:51:55,472 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7794): checking encryption for 08787acc4c8783f43f2b29bbed40a0de 2024-12-11T20:51:55,472 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7797): checking classloading for 08787acc4c8783f43f2b29bbed40a0de 2024-12-11T20:51:55,473 INFO [StoreOpener-58264c5b234b4bd11beed47f41492f9c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 58264c5b234b4bd11beed47f41492f9c 2024-12-11T20:51:55,473 INFO [StoreOpener-08787acc4c8783f43f2b29bbed40a0de-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 08787acc4c8783f43f2b29bbed40a0de 2024-12-11T20:51:55,475 INFO [StoreOpener-08787acc4c8783f43f2b29bbed40a0de-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 08787acc4c8783f43f2b29bbed40a0de columnFamilyName cf 2024-12-11T20:51:55,475 INFO [StoreOpener-58264c5b234b4bd11beed47f41492f9c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 58264c5b234b4bd11beed47f41492f9c columnFamilyName cf 2024-12-11T20:51:55,475 DEBUG [StoreOpener-08787acc4c8783f43f2b29bbed40a0de-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:51:55,475 DEBUG [StoreOpener-58264c5b234b4bd11beed47f41492f9c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:51:55,475 INFO [StoreOpener-08787acc4c8783f43f2b29bbed40a0de-1 {}] regionserver.HStore(327): Store=08787acc4c8783f43f2b29bbed40a0de/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:51:55,475 INFO [StoreOpener-58264c5b234b4bd11beed47f41492f9c-1 {}] regionserver.HStore(327): Store=58264c5b234b4bd11beed47f41492f9c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:51:55,475 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1038): replaying wal for 58264c5b234b4bd11beed47f41492f9c 2024-12-11T20:51:55,475 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1038): replaying wal for 08787acc4c8783f43f2b29bbed40a0de 2024-12-11T20:51:55,476 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportExpiredSnapshot/08787acc4c8783f43f2b29bbed40a0de 2024-12-11T20:51:55,476 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportExpiredSnapshot/58264c5b234b4bd11beed47f41492f9c 2024-12-11T20:51:55,476 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportExpiredSnapshot/58264c5b234b4bd11beed47f41492f9c 2024-12-11T20:51:55,476 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportExpiredSnapshot/08787acc4c8783f43f2b29bbed40a0de 2024-12-11T20:51:55,476 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1048): stopping wal replay for 08787acc4c8783f43f2b29bbed40a0de 2024-12-11T20:51:55,476 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1060): Cleaning up temporary data for 08787acc4c8783f43f2b29bbed40a0de 2024-12-11T20:51:55,476 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1048): stopping wal replay for 58264c5b234b4bd11beed47f41492f9c 2024-12-11T20:51:55,476 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1060): Cleaning up temporary data for 58264c5b234b4bd11beed47f41492f9c 2024-12-11T20:51:55,477 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1093): writing seq id for 08787acc4c8783f43f2b29bbed40a0de 2024-12-11T20:51:55,478 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1093): writing seq id for 58264c5b234b4bd11beed47f41492f9c 2024-12-11T20:51:55,479 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportExpiredSnapshot/08787acc4c8783f43f2b29bbed40a0de/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:51:55,480 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportExpiredSnapshot/58264c5b234b4bd11beed47f41492f9c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:51:55,480 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1114): Opened 08787acc4c8783f43f2b29bbed40a0de; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66471395, jitterRate=-0.009499028325080872}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:51:55,480 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 08787acc4c8783f43f2b29bbed40a0de 2024-12-11T20:51:55,480 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1114): Opened 58264c5b234b4bd11beed47f41492f9c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74551577, jitterRate=0.11090506613254547}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:51:55,480 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 58264c5b234b4bd11beed47f41492f9c 2024-12-11T20:51:55,480 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1006): Region open journal for 08787acc4c8783f43f2b29bbed40a0de: Running coprocessor pre-open hook at 1733950315472Writing region info on filesystem at 1733950315472Initializing all the Stores at 1733950315473 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950315473Cleaning up temporary data from old regions at 1733950315476 (+3 ms)Running coprocessor post-open hooks at 1733950315480 (+4 ms)Region opened successfully at 1733950315480 2024-12-11T20:51:55,480 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1006): Region open journal for 58264c5b234b4bd11beed47f41492f9c: Running coprocessor pre-open hook at 1733950315472Writing region info on filesystem at 1733950315472Initializing all the Stores at 1733950315473 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950315473Cleaning up temporary data from old regions at 1733950315476 (+3 ms)Running coprocessor post-open hooks at 1733950315480 (+4 ms)Region opened successfully at 1733950315480 2024-12-11T20:51:55,481 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c., pid=183, masterSystemTime=1733950315468 2024-12-11T20:51:55,481 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,,1733950315084.08787acc4c8783f43f2b29bbed40a0de., pid=184, masterSystemTime=1733950315469 2024-12-11T20:51:55,483 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c. 2024-12-11T20:51:55,483 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c. 2024-12-11T20:51:55,484 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=58264c5b234b4bd11beed47f41492f9c, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:51:55,485 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,,1733950315084.08787acc4c8783f43f2b29bbed40a0de. 2024-12-11T20:51:55,485 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,,1733950315084.08787acc4c8783f43f2b29bbed40a0de. 2024-12-11T20:51:55,485 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=08787acc4c8783f43f2b29bbed40a0de, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:51:55,485 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=183, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure 58264c5b234b4bd11beed47f41492f9c, server=5be53b084ac7,39865,1733949995112 because future has completed 2024-12-11T20:51:55,487 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=184, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure 08787acc4c8783f43f2b29bbed40a0de, server=5be53b084ac7,40781,1733949995244 because future has completed 2024-12-11T20:51:55,490 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=183, resume processing ppid=182 2024-12-11T20:51:55,490 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=183, ppid=182, state=SUCCESS, hasLock=false; OpenRegionProcedure 58264c5b234b4bd11beed47f41492f9c, server=5be53b084ac7,39865,1733949995112 in 171 msec 2024-12-11T20:51:55,492 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=182, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=58264c5b234b4bd11beed47f41492f9c, ASSIGN in 329 msec 2024-12-11T20:51:55,492 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=184, resume processing ppid=181 2024-12-11T20:51:55,492 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=184, ppid=181, state=SUCCESS, hasLock=false; OpenRegionProcedure 08787acc4c8783f43f2b29bbed40a0de, server=5be53b084ac7,40781,1733949995244 in 172 msec 2024-12-11T20:51:55,494 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=181, resume processing ppid=180 2024-12-11T20:51:55,494 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=181, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=08787acc4c8783f43f2b29bbed40a0de, ASSIGN in 331 msec 2024-12-11T20:51:55,494 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T20:51:55,495 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950315494"}]},"ts":"1733950315494"} 2024-12-11T20:51:55,496 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-11T20:51:55,497 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T20:51:55,497 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-12-11T20:51:55,501 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-11T20:51:55,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:51:55,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:51:55,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:51:55,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:51:55,548 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:51:55,548 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:51:55,548 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:51:55,548 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:51:55,548 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:51:55,548 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:51:55,548 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:51:55,548 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:51:55,550 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=180, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot in 464 msec 2024-12-11T20:51:55,555 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_0/usercache/jenkins/appcache/application_1733950008312_0007/container_1733950008312_0007_01_000002/launch_container.sh] 2024-12-11T20:51:55,555 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_0/usercache/jenkins/appcache/application_1733950008312_0007/container_1733950008312_0007_01_000002/container_tokens] 2024-12-11T20:51:55,555 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_0/usercache/jenkins/appcache/application_1733950008312_0007/container_1733950008312_0007_01_000002/sysfs] 2024-12-11T20:51:55,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-11T20:51:55,723 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportExpiredSnapshot completed 2024-12-11T20:51:55,723 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-12-11T20:51:55,723 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:51:55,727 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-12-11T20:51:55,727 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:51:55,727 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testExportExpiredSnapshot assigned. 2024-12-11T20:51:55,728 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T20:51:55,735 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='0b466999f9ea845fffc18b92b2d4821e2', locateType=CURRENT is [region=testExportExpiredSnapshot,,1733950315084.08787acc4c8783f43f2b29bbed40a0de., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:51:55,745 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='1e26c1a82119b873ffdbdd99e9f554582', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:51:55,746 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='22aa3992762a3a7bcf8ca2d99d919efb0', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:51:55,747 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='34011cde0c11bb79b960e1865e291a5b8', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:51:55,747 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='4787bb53471a5fc57598b7a37b080c4c1', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:51:55,748 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='576257f6339f82bf066d948769bafc138', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:51:55,749 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='69faf646f67e1d9ecb24e3a4952e7ac67', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:51:55,750 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='d562045b0bbb02886b95da71917539b4', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:51:55,750 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='2e0c9ffd43380be1919546604f987ed1', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:51:55,756 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40781 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,,1733950315084.08787acc4c8783f43f2b29bbed40a0de. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T20:51:55,758 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39865 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T20:51:55,759 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T20:51:55,763 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-12-11T20:51:55,764 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1733950315084.08787acc4c8783f43f2b29bbed40a0de. 2024-12-11T20:51:55,764 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:51:55,766 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T20:51:55,775 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T20:51:55,784 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-11T20:51:55,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-11T20:51:55,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T20:51:55,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e06c31c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:55,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:51:55,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:51:55,785 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:51:55,785 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:51:55,785 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:51:55,786 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2cb0b2f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:55,786 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:51:55,786 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:51:55,786 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:55,787 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59016, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:51:55,787 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4bca8476, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:55,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:51:55,788 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:51:55,789 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:51:55,789 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37558, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:51:55,791 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:51:55,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:51:55,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:55,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:55,791 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:51:55,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2001c7ad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:55,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:51:55,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:51:55,793 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:51:55,793 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:51:55,793 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:51:55,793 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66dcf3e6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:55,793 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:51:55,793 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:51:55,793 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:55,795 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59032, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:51:55,796 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74d79fea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:51:55,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:51:55,799 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:51:55,800 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:51:55,801 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37572, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:51:55,803 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:51:55,803 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:51:55,804 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42794, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:51:55,805 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:51:55,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:51:55,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:55,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:51:55,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-11T20:51:55,807 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:51:55,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T20:51:55,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-11T20:51:55,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-12-11T20:51:55,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-11T20:51:55,817 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-12-11T20:51:55,819 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T20:51:55,823 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T20:51:55,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742225_1401 (size=152) 2024-12-11T20:51:55,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742225_1401 (size=152) 2024-12-11T20:51:55,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742225_1401 (size=152) 2024-12-11T20:51:55,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-11T20:51:56,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-11T20:51:56,246 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T20:51:56,246 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 08787acc4c8783f43f2b29bbed40a0de}, {pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 58264c5b234b4bd11beed47f41492f9c}] 2024-12-11T20:51:56,248 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 58264c5b234b4bd11beed47f41492f9c 2024-12-11T20:51:56,248 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 08787acc4c8783f43f2b29bbed40a0de 2024-12-11T20:51:56,400 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=187 2024-12-11T20:51:56,400 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c. 2024-12-11T20:51:56,400 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2902): Flushing 58264c5b234b4bd11beed47f41492f9c 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-11T20:51:56,401 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40781 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=186 2024-12-11T20:51:56,401 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1733950315084.08787acc4c8783f43f2b29bbed40a0de. 2024-12-11T20:51:56,401 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2902): Flushing 08787acc4c8783f43f2b29bbed40a0de 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-11T20:51:56,416 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportExpiredSnapshot/08787acc4c8783f43f2b29bbed40a0de/.tmp/cf/be7a32487f834f1aa043b3d5803803c8 is 71, key is 00c8d1d5d8e936afee7b440fb37024d2/cf:q/1733950315756/Put/seqid=0 2024-12-11T20:51:56,420 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportExpiredSnapshot/58264c5b234b4bd11beed47f41492f9c/.tmp/cf/0ca637e61ac74bb28294f8b555e96ad1 is 71, key is 1062e2f7a469092fac5134b1077f9fb0/cf:q/1733950315758/Put/seqid=0 2024-12-11T20:51:56,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742226_1402 (size=5422) 2024-12-11T20:51:56,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742226_1402 (size=5422) 2024-12-11T20:51:56,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742226_1402 (size=5422) 2024-12-11T20:51:56,429 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportExpiredSnapshot/08787acc4c8783f43f2b29bbed40a0de/.tmp/cf/be7a32487f834f1aa043b3d5803803c8 2024-12-11T20:51:56,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742227_1403 (size=8188) 2024-12-11T20:51:56,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742227_1403 (size=8188) 2024-12-11T20:51:56,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742227_1403 (size=8188) 2024-12-11T20:51:56,431 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportExpiredSnapshot/58264c5b234b4bd11beed47f41492f9c/.tmp/cf/0ca637e61ac74bb28294f8b555e96ad1 2024-12-11T20:51:56,435 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportExpiredSnapshot/08787acc4c8783f43f2b29bbed40a0de/.tmp/cf/be7a32487f834f1aa043b3d5803803c8 as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportExpiredSnapshot/08787acc4c8783f43f2b29bbed40a0de/cf/be7a32487f834f1aa043b3d5803803c8 2024-12-11T20:51:56,436 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportExpiredSnapshot/58264c5b234b4bd11beed47f41492f9c/.tmp/cf/0ca637e61ac74bb28294f8b555e96ad1 as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportExpiredSnapshot/58264c5b234b4bd11beed47f41492f9c/cf/0ca637e61ac74bb28294f8b555e96ad1 2024-12-11T20:51:56,439 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportExpiredSnapshot/08787acc4c8783f43f2b29bbed40a0de/cf/be7a32487f834f1aa043b3d5803803c8, entries=5, sequenceid=5, filesize=5.3 K 2024-12-11T20:51:56,440 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 08787acc4c8783f43f2b29bbed40a0de in 39ms, sequenceid=5, compaction requested=false 2024-12-11T20:51:56,440 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-11T20:51:56,440 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportExpiredSnapshot/58264c5b234b4bd11beed47f41492f9c/cf/0ca637e61ac74bb28294f8b555e96ad1, entries=45, sequenceid=5, filesize=8.0 K 2024-12-11T20:51:56,441 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2603): Flush status journal for 08787acc4c8783f43f2b29bbed40a0de: 2024-12-11T20:51:56,441 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1733950315084.08787acc4c8783f43f2b29bbed40a0de. for snapshot-testExportExpiredSnapshot completed. 2024-12-11T20:51:56,441 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1733950315084.08787acc4c8783f43f2b29bbed40a0de.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-11T20:51:56,441 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 58264c5b234b4bd11beed47f41492f9c in 41ms, sequenceid=5, compaction requested=false 2024-12-11T20:51:56,441 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:51:56,441 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2603): Flush status journal for 58264c5b234b4bd11beed47f41492f9c: 2024-12-11T20:51:56,441 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c. for snapshot-testExportExpiredSnapshot completed. 2024-12-11T20:51:56,441 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportExpiredSnapshot/08787acc4c8783f43f2b29bbed40a0de/cf/be7a32487f834f1aa043b3d5803803c8] hfiles 2024-12-11T20:51:56,441 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportExpiredSnapshot/08787acc4c8783f43f2b29bbed40a0de/cf/be7a32487f834f1aa043b3d5803803c8 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-11T20:51:56,441 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-11T20:51:56,441 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:51:56,441 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportExpiredSnapshot/58264c5b234b4bd11beed47f41492f9c/cf/0ca637e61ac74bb28294f8b555e96ad1] hfiles 2024-12-11T20:51:56,441 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportExpiredSnapshot/58264c5b234b4bd11beed47f41492f9c/cf/0ca637e61ac74bb28294f8b555e96ad1 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-11T20:51:56,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-11T20:51:56,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742229_1405 (size=103) 2024-12-11T20:51:56,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742229_1405 (size=103) 2024-12-11T20:51:56,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742228_1404 (size=103) 2024-12-11T20:51:56,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742228_1404 (size=103) 2024-12-11T20:51:56,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742228_1404 (size=103) 2024-12-11T20:51:56,447 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c. 2024-12-11T20:51:56,447 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=187 2024-12-11T20:51:56,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742229_1405 (size=103) 2024-12-11T20:51:56,448 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1733950315084.08787acc4c8783f43f2b29bbed40a0de. 2024-12-11T20:51:56,448 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-12-11T20:51:56,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=187 2024-12-11T20:51:56,448 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 58264c5b234b4bd11beed47f41492f9c 2024-12-11T20:51:56,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=186 2024-12-11T20:51:56,448 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 58264c5b234b4bd11beed47f41492f9c 2024-12-11T20:51:56,448 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 08787acc4c8783f43f2b29bbed40a0de 2024-12-11T20:51:56,448 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 08787acc4c8783f43f2b29bbed40a0de 2024-12-11T20:51:56,449 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=186, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 08787acc4c8783f43f2b29bbed40a0de in 203 msec 2024-12-11T20:51:56,450 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=187, resume processing ppid=185 2024-12-11T20:51:56,450 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=187, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 58264c5b234b4bd11beed47f41492f9c in 203 msec 2024-12-11T20:51:56,450 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T20:51:56,451 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T20:51:56,452 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T20:51:56,452 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-12-11T20:51:56,452 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-11T20:51:56,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742230_1406 (size=609) 2024-12-11T20:51:56,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742230_1406 (size=609) 2024-12-11T20:51:56,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742230_1406 (size=609) 2024-12-11T20:51:56,462 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T20:51:56,466 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T20:51:56,467 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-12-11T20:51:56,469 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T20:51:56,469 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-12-11T20:51:56,470 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=185, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 662 msec 2024-12-11T20:51:56,944 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0007_000001 (auth:SIMPLE) from 127.0.0.1:37060 2024-12-11T20:51:56,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-11T20:51:56,953 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot completed 2024-12-11T20:51:56,959 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_2/usercache/jenkins/appcache/application_1733950008312_0007/container_1733950008312_0007_01_000001/launch_container.sh] 2024-12-11T20:51:56,959 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_2/usercache/jenkins/appcache/application_1733950008312_0007/container_1733950008312_0007_01_000001/container_tokens] 2024-12-11T20:51:56,959 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_2/usercache/jenkins/appcache/application_1733950008312_0007/container_1733950008312_0007_01_000001/sysfs] 2024-12-11T20:51:58,276 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T20:52:01,784 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T20:52:03,973 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-11T20:52:03,973 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-11T20:52:06,962 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950326962 2024-12-11T20:52:06,963 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:36717, tgtDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950326962, rawTgtDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950326962, srcFsUri=hdfs://localhost:36717, srcDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:52:06,990 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:36717, inputRoot=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:52:06,990 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_754086280_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950326962, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950326962/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-11T20:52:06,992 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T20:52:06,993 ERROR [Time-limited test {}] util.AbstractHBaseTool(152): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:960) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1105) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:362) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T20:52:06,995 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-12-11T20:52:06,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=188, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-11T20:52:06,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-11T20:52:07,000 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950327000"}]},"ts":"1733950327000"} 2024-12-11T20:52:07,002 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-12-11T20:52:07,002 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-12-11T20:52:07,003 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=189, ppid=188, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-12-11T20:52:07,005 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c5defaa8fa0dad7e821babe09e979667, UNASSIGN}, {pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3d939f248aeae78796be97c6162b82cf, UNASSIGN}] 2024-12-11T20:52:07,006 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3d939f248aeae78796be97c6162b82cf, UNASSIGN 2024-12-11T20:52:07,006 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c5defaa8fa0dad7e821babe09e979667, UNASSIGN 2024-12-11T20:52:07,007 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=c5defaa8fa0dad7e821babe09e979667, regionState=CLOSING, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:52:07,007 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=3d939f248aeae78796be97c6162b82cf, regionState=CLOSING, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:52:07,011 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c5defaa8fa0dad7e821babe09e979667, UNASSIGN because future has completed 2024-12-11T20:52:07,012 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T20:52:07,012 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=192, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure c5defaa8fa0dad7e821babe09e979667, server=5be53b084ac7,39865,1733949995112}] 2024-12-11T20:52:07,015 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3d939f248aeae78796be97c6162b82cf, UNASSIGN because future has completed 2024-12-11T20:52:07,015 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T20:52:07,015 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=193, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3d939f248aeae78796be97c6162b82cf, server=5be53b084ac7,40355,1733949994820}] 2024-12-11T20:52:07,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-11T20:52:07,164 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(122): Close c5defaa8fa0dad7e821babe09e979667 2024-12-11T20:52:07,164 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T20:52:07,164 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1722): Closing c5defaa8fa0dad7e821babe09e979667, disabling compactions & flushes 2024-12-11T20:52:07,164 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667. 2024-12-11T20:52:07,164 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667. 2024-12-11T20:52:07,164 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667. after waiting 0 ms 2024-12-11T20:52:07,164 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667. 2024-12-11T20:52:07,171 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(122): Close 3d939f248aeae78796be97c6162b82cf 2024-12-11T20:52:07,171 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T20:52:07,171 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1722): Closing 3d939f248aeae78796be97c6162b82cf, disabling compactions & flushes 2024-12-11T20:52:07,171 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf. 2024-12-11T20:52:07,171 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf. 2024-12-11T20:52:07,171 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf. after waiting 0 ms 2024-12-11T20:52:07,171 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf. 2024-12-11T20:52:07,183 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/c5defaa8fa0dad7e821babe09e979667/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T20:52:07,184 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:52:07,184 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667. 2024-12-11T20:52:07,184 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1676): Region close journal for c5defaa8fa0dad7e821babe09e979667: Waiting for close lock at 1733950327164Running coprocessor pre-close hooks at 1733950327164Disabling compacts and flushes for region at 1733950327164Disabling writes for close at 1733950327164Writing region close event to WAL at 1733950327165 (+1 ms)Running coprocessor post-close hooks at 1733950327183 (+18 ms)Closed at 1733950327184 (+1 ms) 2024-12-11T20:52:07,186 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(157): Closed c5defaa8fa0dad7e821babe09e979667 2024-12-11T20:52:07,187 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=c5defaa8fa0dad7e821babe09e979667, regionState=CLOSED 2024-12-11T20:52:07,191 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/3d939f248aeae78796be97c6162b82cf/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T20:52:07,192 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:52:07,192 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf. 2024-12-11T20:52:07,192 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1676): Region close journal for 3d939f248aeae78796be97c6162b82cf: Waiting for close lock at 1733950327171Running coprocessor pre-close hooks at 1733950327171Disabling compacts and flushes for region at 1733950327171Disabling writes for close at 1733950327171Writing region close event to WAL at 1733950327184 (+13 ms)Running coprocessor post-close hooks at 1733950327192 (+8 ms)Closed at 1733950327192 2024-12-11T20:52:07,193 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=192, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure c5defaa8fa0dad7e821babe09e979667, server=5be53b084ac7,39865,1733949995112 because future has completed 2024-12-11T20:52:07,195 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(157): Closed 3d939f248aeae78796be97c6162b82cf 2024-12-11T20:52:07,205 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=3d939f248aeae78796be97c6162b82cf, regionState=CLOSED 2024-12-11T20:52:07,206 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=192, resume processing ppid=190 2024-12-11T20:52:07,207 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=193, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3d939f248aeae78796be97c6162b82cf, server=5be53b084ac7,40355,1733949994820 because future has completed 2024-12-11T20:52:07,207 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=192, ppid=190, state=SUCCESS, hasLock=false; CloseRegionProcedure c5defaa8fa0dad7e821babe09e979667, server=5be53b084ac7,39865,1733949995112 in 183 msec 2024-12-11T20:52:07,209 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=190, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c5defaa8fa0dad7e821babe09e979667, UNASSIGN in 201 msec 2024-12-11T20:52:07,210 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=193, resume processing ppid=191 2024-12-11T20:52:07,210 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=193, ppid=191, state=SUCCESS, hasLock=false; CloseRegionProcedure 3d939f248aeae78796be97c6162b82cf, server=5be53b084ac7,40355,1733949994820 in 193 msec 2024-12-11T20:52:07,212 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=191, resume processing ppid=189 2024-12-11T20:52:07,212 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=191, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3d939f248aeae78796be97c6162b82cf, UNASSIGN in 205 msec 2024-12-11T20:52:07,217 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=189, resume processing ppid=188 2024-12-11T20:52:07,217 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=189, ppid=188, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 210 msec 2024-12-11T20:52:07,219 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950327219"}]},"ts":"1733950327219"} 2024-12-11T20:52:07,221 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-12-11T20:52:07,221 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-12-11T20:52:07,223 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=188, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 227 msec 2024-12-11T20:52:07,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-11T20:52:07,313 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-11T20:52:07,314 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-12-11T20:52:07,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-11T20:52:07,316 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-11T20:52:07,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-12-11T20:52:07,317 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=194, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-11T20:52:07,319 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-12-11T20:52:07,321 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/3d939f248aeae78796be97c6162b82cf 2024-12-11T20:52:07,321 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/c5defaa8fa0dad7e821babe09e979667 2024-12-11T20:52:07,327 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/3d939f248aeae78796be97c6162b82cf/cf, FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/3d939f248aeae78796be97c6162b82cf/recovered.edits] 2024-12-11T20:52:07,327 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/c5defaa8fa0dad7e821babe09e979667/cf, FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/c5defaa8fa0dad7e821babe09e979667/recovered.edits] 2024-12-11T20:52:07,535 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/3d939f248aeae78796be97c6162b82cf/cf/9a426d81da74412ca488db067bb90e4c to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportExpiredSnapshot/3d939f248aeae78796be97c6162b82cf/cf/9a426d81da74412ca488db067bb90e4c 2024-12-11T20:52:07,535 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/c5defaa8fa0dad7e821babe09e979667/cf/a829cebce39e4d65b8e6c786e436521d to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportExpiredSnapshot/c5defaa8fa0dad7e821babe09e979667/cf/a829cebce39e4d65b8e6c786e436521d 2024-12-11T20:52:07,539 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/3d939f248aeae78796be97c6162b82cf/recovered.edits/9.seqid to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportExpiredSnapshot/3d939f248aeae78796be97c6162b82cf/recovered.edits/9.seqid 2024-12-11T20:52:07,539 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/c5defaa8fa0dad7e821babe09e979667/recovered.edits/9.seqid to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportExpiredSnapshot/c5defaa8fa0dad7e821babe09e979667/recovered.edits/9.seqid 2024-12-11T20:52:07,539 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/3d939f248aeae78796be97c6162b82cf 2024-12-11T20:52:07,539 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportExpiredSnapshot/c5defaa8fa0dad7e821babe09e979667 2024-12-11T20:52:07,541 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-12-11T20:52:07,544 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=194, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-11T20:52:07,547 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-12-11T20:52:07,550 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-12-11T20:52:07,552 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=194, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-11T20:52:07,552 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-12-11T20:52:07,553 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733950327552"}]},"ts":"9223372036854775807"} 2024-12-11T20:52:07,553 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733950327552"}]},"ts":"9223372036854775807"} 2024-12-11T20:52:07,555 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T20:52:07,555 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => c5defaa8fa0dad7e821babe09e979667, NAME => 'testtb-testExportExpiredSnapshot,,1733950313386.c5defaa8fa0dad7e821babe09e979667.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 3d939f248aeae78796be97c6162b82cf, NAME => 'testtb-testExportExpiredSnapshot,1,1733950313386.3d939f248aeae78796be97c6162b82cf.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T20:52:07,555 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-12-11T20:52:07,555 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733950327555"}]},"ts":"9223372036854775807"} 2024-12-11T20:52:07,557 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportExpiredSnapshot state from META 2024-12-11T20:52:07,558 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=194, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-11T20:52:07,559 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=194, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 244 msec 2024-12-11T20:52:07,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-11T20:52:07,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-11T20:52:07,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-11T20:52:07,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-11T20:52:07,682 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-11T20:52:07,682 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-11T20:52:07,682 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-11T20:52:07,682 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-11T20:52:07,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-11T20:52:07,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-11T20:52:07,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-11T20:52:07,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:52:07,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:52:07,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:52:07,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-11T20:52:07,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:52:07,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=194 2024-12-11T20:52:07,694 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:52:07,694 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportExpiredSnapshot 2024-12-11T20:52:07,694 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-11T20:52:07,694 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:52:07,694 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:52:07,695 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:52:07,700 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" type: DISABLED 2024-12-11T20:52:07,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-12-11T20:52:07,703 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" type: DISABLED 2024-12-11T20:52:07,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-12-11T20:52:07,706 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" type: DISABLED 2024-12-11T20:52:07,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-12-11T20:52:07,731 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=804 (was 815), OpenFileDescriptor=779 (was 815), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=930 (was 1005), ProcessCount=14 (was 17), AvailableMemoryMB=2038 (was 559) - AvailableMemoryMB LEAK? - 2024-12-11T20:52:07,732 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=804 is superior to 500 2024-12-11T20:52:07,750 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=804, OpenFileDescriptor=779, MaxFileDescriptor=1048576, SystemLoadAverage=930, ProcessCount=14, AvailableMemoryMB=2033 2024-12-11T20:52:07,750 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=804 is superior to 500 2024-12-11T20:52:07,751 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T20:52:07,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-11T20:52:07,753 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T20:52:07,753 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:52:07,753 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 195 2024-12-11T20:52:07,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-11T20:52:07,754 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T20:52:07,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742231_1407 (size=412) 2024-12-11T20:52:07,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742231_1407 (size=412) 2024-12-11T20:52:07,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742231_1407 (size=412) 2024-12-11T20:52:07,763 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => a36e6ed8b0335a0457615a1b86f94ca1, NAME => 'testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:52:07,763 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 2cd958c9ffcd19341df8ec89b3533695, NAME => 'testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:52:07,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742233_1409 (size=73) 2024-12-11T20:52:07,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742233_1409 (size=73) 2024-12-11T20:52:07,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742232_1408 (size=73) 2024-12-11T20:52:07,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742233_1409 (size=73) 2024-12-11T20:52:07,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742232_1408 (size=73) 2024-12-11T20:52:07,779 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:52:07,779 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing a36e6ed8b0335a0457615a1b86f94ca1, disabling compactions & flushes 2024-12-11T20:52:07,779 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1. 2024-12-11T20:52:07,779 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1. 2024-12-11T20:52:07,779 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1. after waiting 0 ms 2024-12-11T20:52:07,779 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1. 2024-12-11T20:52:07,779 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1. 2024-12-11T20:52:07,779 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for a36e6ed8b0335a0457615a1b86f94ca1: Waiting for close lock at 1733950327779Disabling compacts and flushes for region at 1733950327779Disabling writes for close at 1733950327779Writing region close event to WAL at 1733950327779Closed at 1733950327779 2024-12-11T20:52:07,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742232_1408 (size=73) 2024-12-11T20:52:07,780 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:52:07,780 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 2cd958c9ffcd19341df8ec89b3533695, disabling compactions & flushes 2024-12-11T20:52:07,780 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695. 2024-12-11T20:52:07,780 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695. 2024-12-11T20:52:07,780 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695. after waiting 0 ms 2024-12-11T20:52:07,780 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695. 2024-12-11T20:52:07,780 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695. 2024-12-11T20:52:07,780 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 2cd958c9ffcd19341df8ec89b3533695: Waiting for close lock at 1733950327780Disabling compacts and flushes for region at 1733950327780Disabling writes for close at 1733950327780Writing region close event to WAL at 1733950327780Closed at 1733950327780 2024-12-11T20:52:07,781 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T20:52:07,781 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733950327781"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950327781"}]},"ts":"1733950327781"} 2024-12-11T20:52:07,781 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733950327781"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950327781"}]},"ts":"1733950327781"} 2024-12-11T20:52:07,783 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T20:52:07,784 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T20:52:07,784 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950327784"}]},"ts":"1733950327784"} 2024-12-11T20:52:07,788 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-12-11T20:52:07,788 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {5be53b084ac7=0} racks are {/default-rack=0} 2024-12-11T20:52:07,789 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T20:52:07,789 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T20:52:07,789 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T20:52:07,789 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T20:52:07,789 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T20:52:07,789 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T20:52:07,789 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T20:52:07,789 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T20:52:07,789 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T20:52:07,789 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T20:52:07,790 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=2cd958c9ffcd19341df8ec89b3533695, ASSIGN}, {pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=a36e6ed8b0335a0457615a1b86f94ca1, ASSIGN}] 2024-12-11T20:52:07,791 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=2cd958c9ffcd19341df8ec89b3533695, ASSIGN 2024-12-11T20:52:07,791 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=a36e6ed8b0335a0457615a1b86f94ca1, ASSIGN 2024-12-11T20:52:07,791 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=a36e6ed8b0335a0457615a1b86f94ca1, ASSIGN; state=OFFLINE, location=5be53b084ac7,40355,1733949994820; forceNewPlan=false, retain=false 2024-12-11T20:52:07,791 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=2cd958c9ffcd19341df8ec89b3533695, ASSIGN; state=OFFLINE, location=5be53b084ac7,39865,1733949995112; forceNewPlan=false, retain=false 2024-12-11T20:52:07,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-11T20:52:07,942 INFO [5be53b084ac7:41779 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T20:52:07,942 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=a36e6ed8b0335a0457615a1b86f94ca1, regionState=OPENING, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:52:07,942 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=2cd958c9ffcd19341df8ec89b3533695, regionState=OPENING, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:52:07,944 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=a36e6ed8b0335a0457615a1b86f94ca1, ASSIGN because future has completed 2024-12-11T20:52:07,944 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=198, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure a36e6ed8b0335a0457615a1b86f94ca1, server=5be53b084ac7,40355,1733949994820}] 2024-12-11T20:52:07,945 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=2cd958c9ffcd19341df8ec89b3533695, ASSIGN because future has completed 2024-12-11T20:52:07,949 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=199, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2cd958c9ffcd19341df8ec89b3533695, server=5be53b084ac7,39865,1733949995112}] 2024-12-11T20:52:08,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-11T20:52:08,102 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1. 2024-12-11T20:52:08,103 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7752): Opening region: {ENCODED => a36e6ed8b0335a0457615a1b86f94ca1, NAME => 'testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T20:52:08,103 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1. service=AccessControlService 2024-12-11T20:52:08,103 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:52:08,103 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState a36e6ed8b0335a0457615a1b86f94ca1 2024-12-11T20:52:08,103 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:52:08,104 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7794): checking encryption for a36e6ed8b0335a0457615a1b86f94ca1 2024-12-11T20:52:08,104 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7797): checking classloading for a36e6ed8b0335a0457615a1b86f94ca1 2024-12-11T20:52:08,104 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695. 2024-12-11T20:52:08,104 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7752): Opening region: {ENCODED => 2cd958c9ffcd19341df8ec89b3533695, NAME => 'testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T20:52:08,105 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695. service=AccessControlService 2024-12-11T20:52:08,105 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:52:08,105 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 2cd958c9ffcd19341df8ec89b3533695 2024-12-11T20:52:08,105 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:52:08,105 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7794): checking encryption for 2cd958c9ffcd19341df8ec89b3533695 2024-12-11T20:52:08,105 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7797): checking classloading for 2cd958c9ffcd19341df8ec89b3533695 2024-12-11T20:52:08,106 INFO [StoreOpener-a36e6ed8b0335a0457615a1b86f94ca1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a36e6ed8b0335a0457615a1b86f94ca1 2024-12-11T20:52:08,106 INFO [StoreOpener-2cd958c9ffcd19341df8ec89b3533695-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 2cd958c9ffcd19341df8ec89b3533695 2024-12-11T20:52:08,107 INFO [StoreOpener-a36e6ed8b0335a0457615a1b86f94ca1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a36e6ed8b0335a0457615a1b86f94ca1 columnFamilyName cf 2024-12-11T20:52:08,108 DEBUG [StoreOpener-a36e6ed8b0335a0457615a1b86f94ca1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:52:08,109 INFO [StoreOpener-a36e6ed8b0335a0457615a1b86f94ca1-1 {}] regionserver.HStore(327): Store=a36e6ed8b0335a0457615a1b86f94ca1/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:52:08,109 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1038): replaying wal for a36e6ed8b0335a0457615a1b86f94ca1 2024-12-11T20:52:08,110 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/a36e6ed8b0335a0457615a1b86f94ca1 2024-12-11T20:52:08,112 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/a36e6ed8b0335a0457615a1b86f94ca1 2024-12-11T20:52:08,112 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1048): stopping wal replay for a36e6ed8b0335a0457615a1b86f94ca1 2024-12-11T20:52:08,112 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1060): Cleaning up temporary data for a36e6ed8b0335a0457615a1b86f94ca1 2024-12-11T20:52:08,115 INFO [StoreOpener-2cd958c9ffcd19341df8ec89b3533695-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2cd958c9ffcd19341df8ec89b3533695 columnFamilyName cf 2024-12-11T20:52:08,115 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1093): writing seq id for a36e6ed8b0335a0457615a1b86f94ca1 2024-12-11T20:52:08,115 DEBUG [StoreOpener-2cd958c9ffcd19341df8ec89b3533695-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:52:08,117 INFO [StoreOpener-2cd958c9ffcd19341df8ec89b3533695-1 {}] regionserver.HStore(327): Store=2cd958c9ffcd19341df8ec89b3533695/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:52:08,118 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1038): replaying wal for 2cd958c9ffcd19341df8ec89b3533695 2024-12-11T20:52:08,118 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/a36e6ed8b0335a0457615a1b86f94ca1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:52:08,118 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/2cd958c9ffcd19341df8ec89b3533695 2024-12-11T20:52:08,118 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1114): Opened a36e6ed8b0335a0457615a1b86f94ca1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74513482, jitterRate=0.11033740639686584}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:52:08,118 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a36e6ed8b0335a0457615a1b86f94ca1 2024-12-11T20:52:08,118 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/2cd958c9ffcd19341df8ec89b3533695 2024-12-11T20:52:08,119 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1048): stopping wal replay for 2cd958c9ffcd19341df8ec89b3533695 2024-12-11T20:52:08,119 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1060): Cleaning up temporary data for 2cd958c9ffcd19341df8ec89b3533695 2024-12-11T20:52:08,119 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1006): Region open journal for a36e6ed8b0335a0457615a1b86f94ca1: Running coprocessor pre-open hook at 1733950328104Writing region info on filesystem at 1733950328104Initializing all the Stores at 1733950328105 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950328106 (+1 ms)Cleaning up temporary data from old regions at 1733950328112 (+6 ms)Running coprocessor post-open hooks at 1733950328118 (+6 ms)Region opened successfully at 1733950328119 (+1 ms) 2024-12-11T20:52:08,120 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1093): writing seq id for 2cd958c9ffcd19341df8ec89b3533695 2024-12-11T20:52:08,122 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1., pid=198, masterSystemTime=1733950328100 2024-12-11T20:52:08,123 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1. 2024-12-11T20:52:08,123 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1. 2024-12-11T20:52:08,124 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=a36e6ed8b0335a0457615a1b86f94ca1, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:52:08,126 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/2cd958c9ffcd19341df8ec89b3533695/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:52:08,126 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=198, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure a36e6ed8b0335a0457615a1b86f94ca1, server=5be53b084ac7,40355,1733949994820 because future has completed 2024-12-11T20:52:08,127 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1114): Opened 2cd958c9ffcd19341df8ec89b3533695; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61444827, jitterRate=-0.0844007283449173}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:52:08,127 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2cd958c9ffcd19341df8ec89b3533695 2024-12-11T20:52:08,127 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1006): Region open journal for 2cd958c9ffcd19341df8ec89b3533695: Running coprocessor pre-open hook at 1733950328105Writing region info on filesystem at 1733950328105Initializing all the Stores at 1733950328106 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950328106Cleaning up temporary data from old regions at 1733950328119 (+13 ms)Running coprocessor post-open hooks at 1733950328127 (+8 ms)Region opened successfully at 1733950328127 2024-12-11T20:52:08,127 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695., pid=199, masterSystemTime=1733950328101 2024-12-11T20:52:08,129 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695. 2024-12-11T20:52:08,129 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695. 2024-12-11T20:52:08,129 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=198, resume processing ppid=197 2024-12-11T20:52:08,129 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=198, ppid=197, state=SUCCESS, hasLock=false; OpenRegionProcedure a36e6ed8b0335a0457615a1b86f94ca1, server=5be53b084ac7,40355,1733949994820 in 183 msec 2024-12-11T20:52:08,130 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=2cd958c9ffcd19341df8ec89b3533695, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:52:08,131 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=197, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=a36e6ed8b0335a0457615a1b86f94ca1, ASSIGN in 339 msec 2024-12-11T20:52:08,131 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=199, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2cd958c9ffcd19341df8ec89b3533695, server=5be53b084ac7,39865,1733949995112 because future has completed 2024-12-11T20:52:08,136 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=199, resume processing ppid=196 2024-12-11T20:52:08,137 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=199, ppid=196, state=SUCCESS, hasLock=false; OpenRegionProcedure 2cd958c9ffcd19341df8ec89b3533695, server=5be53b084ac7,39865,1733949995112 in 186 msec 2024-12-11T20:52:08,139 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=196, resume processing ppid=195 2024-12-11T20:52:08,140 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=196, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=2cd958c9ffcd19341df8ec89b3533695, ASSIGN in 347 msec 2024-12-11T20:52:08,140 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T20:52:08,140 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950328140"}]},"ts":"1733950328140"} 2024-12-11T20:52:08,143 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-12-11T20:52:08,144 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T20:52:08,145 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-12-11T20:52:08,147 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-11T20:52:08,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:52:08,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:52:08,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:52:08,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:52:08,201 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-11T20:52:08,201 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-11T20:52:08,201 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-11T20:52:08,201 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:52:08,201 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:52:08,202 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-11T20:52:08,202 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:52:08,202 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:52:08,203 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=195, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 450 msec 2024-12-11T20:52:08,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-11T20:52:08,383 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-11T20:52:08,384 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testEmptyExportFileSystemState get assigned. Timeout = 60000ms 2024-12-11T20:52:08,384 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:52:08,387 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40781 {}] regionserver.StoreScanner(1138): Switch to stream read (scanned=32778 bytes) of info 2024-12-11T20:52:08,392 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testEmptyExportFileSystemState assigned to meta. Checking AM states. 2024-12-11T20:52:08,392 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:52:08,392 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testEmptyExportFileSystemState assigned. 2024-12-11T20:52:08,392 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T20:52:08,395 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-11T20:52:08,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733950328395 (current time:1733950328395). 2024-12-11T20:52:08,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T20:52:08,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-11T20:52:08,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T20:52:08,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56e8e973, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:52:08,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:52:08,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:52:08,397 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:52:08,397 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:52:08,397 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:52:08,398 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33456070, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:52:08,398 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:52:08,398 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:52:08,398 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:52:08,399 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37436, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:52:08,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76def971, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:52:08,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:52:08,401 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:52:08,401 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:52:08,402 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38020, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:52:08,403 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:52:08,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:52:08,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:52:08,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:52:08,403 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:52:08,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5be9dd66, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:52:08,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:52:08,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:52:08,405 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:52:08,405 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:52:08,405 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:52:08,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ef5b318, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:52:08,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:52:08,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:52:08,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:52:08,407 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37462, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:52:08,407 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57177ede, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:52:08,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:52:08,408 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:52:08,409 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:52:08,410 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38022, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:52:08,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:52:08,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:52:08,415 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35726, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:52:08,416 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:52:08,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:52:08,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:52:08,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:52:08,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-11T20:52:08,417 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:52:08,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T20:52:08,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-11T20:52:08,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-12-11T20:52:08,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-11T20:52:08,423 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T20:52:08,425 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T20:52:08,429 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T20:52:08,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742234_1410 (size=185) 2024-12-11T20:52:08,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742234_1410 (size=185) 2024-12-11T20:52:08,443 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T20:52:08,443 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2cd958c9ffcd19341df8ec89b3533695}, {pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a36e6ed8b0335a0457615a1b86f94ca1}] 2024-12-11T20:52:08,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742234_1410 (size=185) 2024-12-11T20:52:08,444 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2cd958c9ffcd19341df8ec89b3533695 2024-12-11T20:52:08,444 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a36e6ed8b0335a0457615a1b86f94ca1 2024-12-11T20:52:08,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-11T20:52:08,596 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40355 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=202 2024-12-11T20:52:08,596 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=201 2024-12-11T20:52:08,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1. 2024-12-11T20:52:08,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695. 2024-12-11T20:52:08,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2603): Flush status journal for a36e6ed8b0335a0457615a1b86f94ca1: 2024-12-11T20:52:08,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-11T20:52:08,597 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.HRegion(2603): Flush status journal for 2cd958c9ffcd19341df8ec89b3533695: 2024-12-11T20:52:08,597 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-11T20:52:08,597 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T20:52:08,597 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:52:08,597 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T20:52:08,597 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T20:52:08,597 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:52:08,597 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T20:52:08,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742235_1411 (size=76) 2024-12-11T20:52:08,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742235_1411 (size=76) 2024-12-11T20:52:08,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742235_1411 (size=76) 2024-12-11T20:52:08,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742236_1412 (size=76) 2024-12-11T20:52:08,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742236_1412 (size=76) 2024-12-11T20:52:08,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742236_1412 (size=76) 2024-12-11T20:52:08,638 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695. 2024-12-11T20:52:08,638 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=201 2024-12-11T20:52:08,638 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1. 2024-12-11T20:52:08,638 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=202 2024-12-11T20:52:08,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=201 2024-12-11T20:52:08,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=202 2024-12-11T20:52:08,639 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region a36e6ed8b0335a0457615a1b86f94ca1 2024-12-11T20:52:08,639 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 2cd958c9ffcd19341df8ec89b3533695 2024-12-11T20:52:08,639 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2cd958c9ffcd19341df8ec89b3533695 2024-12-11T20:52:08,639 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a36e6ed8b0335a0457615a1b86f94ca1 2024-12-11T20:52:08,647 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=201, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 2cd958c9ffcd19341df8ec89b3533695 in 197 msec 2024-12-11T20:52:08,652 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=202, resume processing ppid=200 2024-12-11T20:52:08,652 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=202, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a36e6ed8b0335a0457615a1b86f94ca1 in 197 msec 2024-12-11T20:52:08,652 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T20:52:08,654 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T20:52:08,654 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T20:52:08,655 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T20:52:08,656 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T20:52:08,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742237_1413 (size=567) 2024-12-11T20:52:08,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742237_1413 (size=567) 2024-12-11T20:52:08,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742237_1413 (size=567) 2024-12-11T20:52:08,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-11T20:52:09,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-11T20:52:09,125 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T20:52:09,143 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T20:52:09,144 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T20:52:09,146 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T20:52:09,146 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-12-11T20:52:09,155 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=200, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 728 msec 2024-12-11T20:52:09,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-11T20:52:09,553 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-11T20:52:09,558 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='0ecc973919b7b1c57f0c337e9f0ffd4be', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:52:09,559 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='189c68928376d43e2e05d16964894214b', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1., hostname=5be53b084ac7,40355,1733949994820, seqNum=2] 2024-12-11T20:52:09,560 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='254229b8b179ceffb98afda40c0580aa7', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1., hostname=5be53b084ac7,40355,1733949994820, seqNum=2] 2024-12-11T20:52:09,561 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='3de6e0bccbe90bafcb88e7ad1665b48ac', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1., hostname=5be53b084ac7,40355,1733949994820, seqNum=2] 2024-12-11T20:52:09,562 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='40de224256db4d5a14830608ab6abcf2b', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1., hostname=5be53b084ac7,40355,1733949994820, seqNum=2] 2024-12-11T20:52:09,566 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39865 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T20:52:09,567 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40355 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T20:52:09,569 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T20:52:09,573 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-11T20:52:09,573 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695. 2024-12-11T20:52:09,573 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:52:09,575 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T20:52:09,585 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T20:52:09,591 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T20:52:09,595 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-11T20:52:09,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733950329595 (current time:1733950329595). 2024-12-11T20:52:09,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T20:52:09,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-11T20:52:09,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T20:52:09,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f67987f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:52:09,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:52:09,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:52:09,597 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:52:09,597 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:52:09,598 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:52:09,598 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16660ddd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:52:09,598 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:52:09,598 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:52:09,598 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:52:09,600 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37476, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:52:09,603 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2bee7ca5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:52:09,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:52:09,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:52:09,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:52:09,606 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38038, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:52:09,607 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:52:09,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:52:09,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:52:09,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:52:09,611 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:52:09,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@446f2c4a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:52:09,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:52:09,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:52:09,612 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:52:09,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:52:09,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:52:09,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c685b4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:52:09,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:52:09,613 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:52:09,613 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:52:09,614 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37490, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:52:09,615 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c5589d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:52:09,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:52:09,617 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:52:09,617 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:52:09,618 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38042, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:52:09,620 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:52:09,620 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:52:09,621 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35730, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:52:09,623 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:52:09,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:52:09,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:52:09,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:52:09,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-11T20:52:09,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T20:52:09,627 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:52:09,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-11T20:52:09,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-11T20:52:09,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-11T20:52:09,632 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T20:52:09,633 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T20:52:09,635 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T20:52:09,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742238_1414 (size=180) 2024-12-11T20:52:09,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742238_1414 (size=180) 2024-12-11T20:52:09,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742238_1414 (size=180) 2024-12-11T20:52:09,641 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T20:52:09,641 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2cd958c9ffcd19341df8ec89b3533695}, {pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a36e6ed8b0335a0457615a1b86f94ca1}] 2024-12-11T20:52:09,641 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a36e6ed8b0335a0457615a1b86f94ca1 2024-12-11T20:52:09,642 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2cd958c9ffcd19341df8ec89b3533695 2024-12-11T20:52:09,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-11T20:52:09,793 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=204 2024-12-11T20:52:09,793 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40355 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=205 2024-12-11T20:52:09,793 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1. 2024-12-11T20:52:09,793 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695. 2024-12-11T20:52:09,793 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2902): Flushing 2cd958c9ffcd19341df8ec89b3533695 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-11T20:52:09,793 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2902): Flushing a36e6ed8b0335a0457615a1b86f94ca1 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-11T20:52:09,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/2cd958c9ffcd19341df8ec89b3533695/.tmp/cf/97cd4bd1c26548a3884481e2199addbe is 71, key is 01013c87eac431b73f21c1bb39c9e87b/cf:q/1733950329565/Put/seqid=0 2024-12-11T20:52:09,814 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/a36e6ed8b0335a0457615a1b86f94ca1/.tmp/cf/5614705cbf80484e8ca6bf48d08e1b80 is 71, key is 10ca6115a1bfead020d282d11c467995/cf:q/1733950329567/Put/seqid=0 2024-12-11T20:52:09,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742239_1415 (size=5356) 2024-12-11T20:52:09,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742239_1415 (size=5356) 2024-12-11T20:52:09,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742239_1415 (size=5356) 2024-12-11T20:52:09,816 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/2cd958c9ffcd19341df8ec89b3533695/.tmp/cf/97cd4bd1c26548a3884481e2199addbe 2024-12-11T20:52:09,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742240_1416 (size=8256) 2024-12-11T20:52:09,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742240_1416 (size=8256) 2024-12-11T20:52:09,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742240_1416 (size=8256) 2024-12-11T20:52:09,820 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/a36e6ed8b0335a0457615a1b86f94ca1/.tmp/cf/5614705cbf80484e8ca6bf48d08e1b80 2024-12-11T20:52:09,821 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/2cd958c9ffcd19341df8ec89b3533695/.tmp/cf/97cd4bd1c26548a3884481e2199addbe as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/2cd958c9ffcd19341df8ec89b3533695/cf/97cd4bd1c26548a3884481e2199addbe 2024-12-11T20:52:09,824 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/a36e6ed8b0335a0457615a1b86f94ca1/.tmp/cf/5614705cbf80484e8ca6bf48d08e1b80 as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/a36e6ed8b0335a0457615a1b86f94ca1/cf/5614705cbf80484e8ca6bf48d08e1b80 2024-12-11T20:52:09,826 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/2cd958c9ffcd19341df8ec89b3533695/cf/97cd4bd1c26548a3884481e2199addbe, entries=4, sequenceid=6, filesize=5.2 K 2024-12-11T20:52:09,826 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 2cd958c9ffcd19341df8ec89b3533695 in 33ms, sequenceid=6, compaction requested=false 2024-12-11T20:52:09,827 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-12-11T20:52:09,827 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2603): Flush status journal for 2cd958c9ffcd19341df8ec89b3533695: 2024-12-11T20:52:09,827 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-11T20:52:09,827 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-11T20:52:09,827 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:52:09,827 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/2cd958c9ffcd19341df8ec89b3533695/cf/97cd4bd1c26548a3884481e2199addbe] hfiles 2024-12-11T20:52:09,827 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/2cd958c9ffcd19341df8ec89b3533695/cf/97cd4bd1c26548a3884481e2199addbe for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-11T20:52:09,828 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/a36e6ed8b0335a0457615a1b86f94ca1/cf/5614705cbf80484e8ca6bf48d08e1b80, entries=46, sequenceid=6, filesize=8.1 K 2024-12-11T20:52:09,829 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for a36e6ed8b0335a0457615a1b86f94ca1 in 36ms, sequenceid=6, compaction requested=false 2024-12-11T20:52:09,829 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2603): Flush status journal for a36e6ed8b0335a0457615a1b86f94ca1: 2024-12-11T20:52:09,829 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-11T20:52:09,829 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-11T20:52:09,829 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:52:09,829 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/a36e6ed8b0335a0457615a1b86f94ca1/cf/5614705cbf80484e8ca6bf48d08e1b80] hfiles 2024-12-11T20:52:09,829 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/a36e6ed8b0335a0457615a1b86f94ca1/cf/5614705cbf80484e8ca6bf48d08e1b80 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-11T20:52:09,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742241_1417 (size=115) 2024-12-11T20:52:09,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742241_1417 (size=115) 2024-12-11T20:52:09,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742241_1417 (size=115) 2024-12-11T20:52:09,839 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695. 2024-12-11T20:52:09,839 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=204 2024-12-11T20:52:09,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=204 2024-12-11T20:52:09,839 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 2cd958c9ffcd19341df8ec89b3533695 2024-12-11T20:52:09,840 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2cd958c9ffcd19341df8ec89b3533695 2024-12-11T20:52:09,848 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=204, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 2cd958c9ffcd19341df8ec89b3533695 in 205 msec 2024-12-11T20:52:09,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742242_1418 (size=115) 2024-12-11T20:52:09,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742242_1418 (size=115) 2024-12-11T20:52:09,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742242_1418 (size=115) 2024-12-11T20:52:09,853 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1. 2024-12-11T20:52:09,853 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=205 2024-12-11T20:52:09,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=205 2024-12-11T20:52:09,853 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region a36e6ed8b0335a0457615a1b86f94ca1 2024-12-11T20:52:09,853 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a36e6ed8b0335a0457615a1b86f94ca1 2024-12-11T20:52:09,855 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=205, resume processing ppid=203 2024-12-11T20:52:09,855 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=205, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a36e6ed8b0335a0457615a1b86f94ca1 in 213 msec 2024-12-11T20:52:09,855 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T20:52:09,856 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T20:52:09,856 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T20:52:09,856 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-12-11T20:52:09,857 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-12-11T20:52:09,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742243_1419 (size=645) 2024-12-11T20:52:09,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742243_1419 (size=645) 2024-12-11T20:52:09,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742243_1419 (size=645) 2024-12-11T20:52:09,873 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T20:52:09,878 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T20:52:09,878 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-12-11T20:52:09,879 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T20:52:09,879 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-11T20:52:09,881 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=203, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 255 msec 2024-12-11T20:52:09,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-11T20:52:09,943 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-11T20:52:09,943 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950329943 2024-12-11T20:52:09,943 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:36717, tgtDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950329943, rawTgtDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950329943, srcFsUri=hdfs://localhost:36717, srcDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:52:09,971 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:36717, inputRoot=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:52:09,971 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_754086280_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950329943, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950329943/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T20:52:09,972 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T20:52:09,975 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950329943/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T20:52:09,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742244_1420 (size=185) 2024-12-11T20:52:09,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742244_1420 (size=185) 2024-12-11T20:52:09,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742244_1420 (size=185) 2024-12-11T20:52:09,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742245_1421 (size=567) 2024-12-11T20:52:09,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742245_1421 (size=567) 2024-12-11T20:52:09,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742245_1421 (size=567) 2024-12-11T20:52:09,989 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:52:09,990 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:52:09,990 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:52:11,066 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop-17052150800378401585.jar 2024-12-11T20:52:11,066 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:52:11,067 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:52:11,137 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop-5459469704762488542.jar 2024-12-11T20:52:11,138 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:52:11,138 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:52:11,139 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:52:11,139 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:52:11,139 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:52:11,140 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:52:11,140 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T20:52:11,141 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T20:52:11,141 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T20:52:11,141 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T20:52:11,142 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T20:52:11,142 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T20:52:11,142 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T20:52:11,142 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T20:52:11,143 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T20:52:11,143 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T20:52:11,143 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T20:52:11,144 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:52:11,144 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:52:11,144 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:52:11,144 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:52:11,145 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:52:11,145 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:52:11,145 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:52:11,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742246_1422 (size=131440) 2024-12-11T20:52:11,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742246_1422 (size=131440) 2024-12-11T20:52:11,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742246_1422 (size=131440) 2024-12-11T20:52:11,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742247_1423 (size=4188619) 2024-12-11T20:52:11,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742247_1423 (size=4188619) 2024-12-11T20:52:11,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742247_1423 (size=4188619) 2024-12-11T20:52:11,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742248_1424 (size=1323991) 2024-12-11T20:52:11,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742248_1424 (size=1323991) 2024-12-11T20:52:11,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742248_1424 (size=1323991) 2024-12-11T20:52:11,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742249_1425 (size=903544) 2024-12-11T20:52:11,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742249_1425 (size=903544) 2024-12-11T20:52:11,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742249_1425 (size=903544) 2024-12-11T20:52:11,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742250_1426 (size=8360360) 2024-12-11T20:52:11,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742250_1426 (size=8360360) 2024-12-11T20:52:11,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742250_1426 (size=8360360) 2024-12-11T20:52:11,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742251_1427 (size=6426228) 2024-12-11T20:52:11,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742251_1427 (size=6426228) 2024-12-11T20:52:11,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742251_1427 (size=6426228) 2024-12-11T20:52:11,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742252_1428 (size=1877034) 2024-12-11T20:52:11,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742252_1428 (size=1877034) 2024-12-11T20:52:11,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742252_1428 (size=1877034) 2024-12-11T20:52:11,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742253_1429 (size=77835) 2024-12-11T20:52:11,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742253_1429 (size=77835) 2024-12-11T20:52:11,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742253_1429 (size=77835) 2024-12-11T20:52:11,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742254_1430 (size=30949) 2024-12-11T20:52:11,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742254_1430 (size=30949) 2024-12-11T20:52:11,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742254_1430 (size=30949) 2024-12-11T20:52:11,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742255_1431 (size=1597213) 2024-12-11T20:52:11,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742255_1431 (size=1597213) 2024-12-11T20:52:11,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742255_1431 (size=1597213) 2024-12-11T20:52:11,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742256_1432 (size=4695811) 2024-12-11T20:52:11,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742256_1432 (size=4695811) 2024-12-11T20:52:11,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742256_1432 (size=4695811) 2024-12-11T20:52:11,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742257_1433 (size=232957) 2024-12-11T20:52:11,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742257_1433 (size=232957) 2024-12-11T20:52:11,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742257_1433 (size=232957) 2024-12-11T20:52:11,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742258_1434 (size=127628) 2024-12-11T20:52:11,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742258_1434 (size=127628) 2024-12-11T20:52:11,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742258_1434 (size=127628) 2024-12-11T20:52:11,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742259_1435 (size=20406) 2024-12-11T20:52:11,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742259_1435 (size=20406) 2024-12-11T20:52:11,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742259_1435 (size=20406) 2024-12-11T20:52:11,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742260_1436 (size=5175431) 2024-12-11T20:52:11,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742260_1436 (size=5175431) 2024-12-11T20:52:11,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742260_1436 (size=5175431) 2024-12-11T20:52:11,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742261_1437 (size=217634) 2024-12-11T20:52:11,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742261_1437 (size=217634) 2024-12-11T20:52:11,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742261_1437 (size=217634) 2024-12-11T20:52:11,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742262_1438 (size=1832290) 2024-12-11T20:52:11,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742262_1438 (size=1832290) 2024-12-11T20:52:11,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742262_1438 (size=1832290) 2024-12-11T20:52:12,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742263_1439 (size=322274) 2024-12-11T20:52:12,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742263_1439 (size=322274) 2024-12-11T20:52:12,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742263_1439 (size=322274) 2024-12-11T20:52:12,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742264_1440 (size=503880) 2024-12-11T20:52:12,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742264_1440 (size=503880) 2024-12-11T20:52:12,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742264_1440 (size=503880) 2024-12-11T20:52:12,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742265_1441 (size=29229) 2024-12-11T20:52:12,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742265_1441 (size=29229) 2024-12-11T20:52:12,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742265_1441 (size=29229) 2024-12-11T20:52:12,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742266_1442 (size=24096) 2024-12-11T20:52:12,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742266_1442 (size=24096) 2024-12-11T20:52:12,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742266_1442 (size=24096) 2024-12-11T20:52:12,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742267_1443 (size=443172) 2024-12-11T20:52:12,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742267_1443 (size=443172) 2024-12-11T20:52:12,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742267_1443 (size=443172) 2024-12-11T20:52:12,717 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T20:52:12,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742268_1444 (size=111872) 2024-12-11T20:52:12,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742268_1444 (size=111872) 2024-12-11T20:52:12,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742268_1444 (size=111872) 2024-12-11T20:52:12,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742269_1445 (size=45609) 2024-12-11T20:52:12,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742269_1445 (size=45609) 2024-12-11T20:52:12,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742269_1445 (size=45609) 2024-12-11T20:52:12,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742270_1446 (size=136454) 2024-12-11T20:52:12,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742270_1446 (size=136454) 2024-12-11T20:52:12,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742270_1446 (size=136454) 2024-12-11T20:52:12,816 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T20:52:12,818 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-12-11T20:52:12,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742271_1447 (size=7) 2024-12-11T20:52:12,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742271_1447 (size=7) 2024-12-11T20:52:12,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742271_1447 (size=7) 2024-12-11T20:52:12,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742272_1448 (size=10) 2024-12-11T20:52:12,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742272_1448 (size=10) 2024-12-11T20:52:12,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742272_1448 (size=10) 2024-12-11T20:52:12,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742273_1449 (size=303905) 2024-12-11T20:52:12,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742273_1449 (size=303905) 2024-12-11T20:52:12,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742273_1449 (size=303905) 2024-12-11T20:52:12,855 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T20:52:12,855 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T20:52:12,951 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0008_000001 (auth:SIMPLE) from 127.0.0.1:45356 2024-12-11T20:52:13,973 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-11T20:52:13,973 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-11T20:52:13,974 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-11T20:52:17,801 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0008_000001 (auth:SIMPLE) from 127.0.0.1:60634 2024-12-11T20:52:18,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742274_1450 (size=349579) 2024-12-11T20:52:18,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742274_1450 (size=349579) 2024-12-11T20:52:18,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742274_1450 (size=349579) 2024-12-11T20:52:18,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742275_1451 (size=8568) 2024-12-11T20:52:18,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742275_1451 (size=8568) 2024-12-11T20:52:18,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742275_1451 (size=8568) 2024-12-11T20:52:18,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742276_1452 (size=460) 2024-12-11T20:52:18,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742276_1452 (size=460) 2024-12-11T20:52:18,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742276_1452 (size=460) 2024-12-11T20:52:18,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742277_1453 (size=8568) 2024-12-11T20:52:18,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742277_1453 (size=8568) 2024-12-11T20:52:18,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742277_1453 (size=8568) 2024-12-11T20:52:19,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742278_1454 (size=349579) 2024-12-11T20:52:19,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742278_1454 (size=349579) 2024-12-11T20:52:19,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742278_1454 (size=349579) 2024-12-11T20:52:19,476 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T20:52:20,974 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T20:52:20,974 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T20:52:20,985 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T20:52:20,985 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T20:52:20,985 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T20:52:20,985 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_754086280_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T20:52:20,986 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-11T20:52:20,986 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-11T20:52:20,986 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_754086280_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950329943/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950329943/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T20:52:20,986 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950329943/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-11T20:52:20,986 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950329943/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-11T20:52:20,993 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-12-11T20:52:20,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=206, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-11T20:52:20,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-11T20:52:20,996 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950340996"}]},"ts":"1733950340996"} 2024-12-11T20:52:20,997 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-12-11T20:52:20,997 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-12-11T20:52:20,998 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=207, ppid=206, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-12-11T20:52:20,999 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=2cd958c9ffcd19341df8ec89b3533695, UNASSIGN}, {pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=a36e6ed8b0335a0457615a1b86f94ca1, UNASSIGN}] 2024-12-11T20:52:21,000 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=a36e6ed8b0335a0457615a1b86f94ca1, UNASSIGN 2024-12-11T20:52:21,000 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=2cd958c9ffcd19341df8ec89b3533695, UNASSIGN 2024-12-11T20:52:21,001 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=2cd958c9ffcd19341df8ec89b3533695, regionState=CLOSING, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:52:21,001 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=a36e6ed8b0335a0457615a1b86f94ca1, regionState=CLOSING, regionLocation=5be53b084ac7,40355,1733949994820 2024-12-11T20:52:21,002 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=2cd958c9ffcd19341df8ec89b3533695, UNASSIGN because future has completed 2024-12-11T20:52:21,002 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T20:52:21,003 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=210, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2cd958c9ffcd19341df8ec89b3533695, server=5be53b084ac7,39865,1733949995112}] 2024-12-11T20:52:21,003 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=a36e6ed8b0335a0457615a1b86f94ca1, UNASSIGN because future has completed 2024-12-11T20:52:21,003 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T20:52:21,003 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=211, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure a36e6ed8b0335a0457615a1b86f94ca1, server=5be53b084ac7,40355,1733949994820}] 2024-12-11T20:52:21,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-11T20:52:21,157 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(122): Close 2cd958c9ffcd19341df8ec89b3533695 2024-12-11T20:52:21,157 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T20:52:21,157 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1722): Closing 2cd958c9ffcd19341df8ec89b3533695, disabling compactions & flushes 2024-12-11T20:52:21,157 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695. 2024-12-11T20:52:21,157 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695. 2024-12-11T20:52:21,158 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695. after waiting 0 ms 2024-12-11T20:52:21,158 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695. 2024-12-11T20:52:21,159 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(122): Close a36e6ed8b0335a0457615a1b86f94ca1 2024-12-11T20:52:21,159 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T20:52:21,159 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1722): Closing a36e6ed8b0335a0457615a1b86f94ca1, disabling compactions & flushes 2024-12-11T20:52:21,159 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1. 2024-12-11T20:52:21,160 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1. 2024-12-11T20:52:21,160 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1. after waiting 0 ms 2024-12-11T20:52:21,160 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1. 2024-12-11T20:52:21,163 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/2cd958c9ffcd19341df8ec89b3533695/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T20:52:21,163 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:52:21,164 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695. 2024-12-11T20:52:21,164 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1676): Region close journal for 2cd958c9ffcd19341df8ec89b3533695: Waiting for close lock at 1733950341157Running coprocessor pre-close hooks at 1733950341157Disabling compacts and flushes for region at 1733950341157Disabling writes for close at 1733950341158 (+1 ms)Writing region close event to WAL at 1733950341158Running coprocessor post-close hooks at 1733950341163 (+5 ms)Closed at 1733950341164 (+1 ms) 2024-12-11T20:52:21,165 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/a36e6ed8b0335a0457615a1b86f94ca1/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T20:52:21,165 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:52:21,165 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1. 2024-12-11T20:52:21,165 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1676): Region close journal for a36e6ed8b0335a0457615a1b86f94ca1: Waiting for close lock at 1733950341159Running coprocessor pre-close hooks at 1733950341159Disabling compacts and flushes for region at 1733950341159Disabling writes for close at 1733950341160 (+1 ms)Writing region close event to WAL at 1733950341160Running coprocessor post-close hooks at 1733950341165 (+5 ms)Closed at 1733950341165 2024-12-11T20:52:21,166 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(157): Closed 2cd958c9ffcd19341df8ec89b3533695 2024-12-11T20:52:21,166 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=2cd958c9ffcd19341df8ec89b3533695, regionState=CLOSED 2024-12-11T20:52:21,167 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(157): Closed a36e6ed8b0335a0457615a1b86f94ca1 2024-12-11T20:52:21,168 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=a36e6ed8b0335a0457615a1b86f94ca1, regionState=CLOSED 2024-12-11T20:52:21,168 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=210, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2cd958c9ffcd19341df8ec89b3533695, server=5be53b084ac7,39865,1733949995112 because future has completed 2024-12-11T20:52:21,170 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=211, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure a36e6ed8b0335a0457615a1b86f94ca1, server=5be53b084ac7,40355,1733949994820 because future has completed 2024-12-11T20:52:21,172 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=210, resume processing ppid=208 2024-12-11T20:52:21,172 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=210, ppid=208, state=SUCCESS, hasLock=false; CloseRegionProcedure 2cd958c9ffcd19341df8ec89b3533695, server=5be53b084ac7,39865,1733949995112 in 167 msec 2024-12-11T20:52:21,174 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=208, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=2cd958c9ffcd19341df8ec89b3533695, UNASSIGN in 173 msec 2024-12-11T20:52:21,174 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=211, resume processing ppid=209 2024-12-11T20:52:21,174 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=211, ppid=209, state=SUCCESS, hasLock=false; CloseRegionProcedure a36e6ed8b0335a0457615a1b86f94ca1, server=5be53b084ac7,40355,1733949994820 in 168 msec 2024-12-11T20:52:21,175 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=209, resume processing ppid=207 2024-12-11T20:52:21,176 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=209, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=a36e6ed8b0335a0457615a1b86f94ca1, UNASSIGN in 175 msec 2024-12-11T20:52:21,178 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=207, resume processing ppid=206 2024-12-11T20:52:21,178 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=207, ppid=206, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 178 msec 2024-12-11T20:52:21,180 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950341180"}]},"ts":"1733950341180"} 2024-12-11T20:52:21,182 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-12-11T20:52:21,182 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-12-11T20:52:21,185 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=206, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 191 msec 2024-12-11T20:52:21,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-11T20:52:21,313 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-11T20:52:21,313 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-12-11T20:52:21,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-11T20:52:21,315 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-11T20:52:21,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-12-11T20:52:21,316 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=212, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-11T20:52:21,318 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-12-11T20:52:21,319 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/2cd958c9ffcd19341df8ec89b3533695 2024-12-11T20:52:21,319 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/a36e6ed8b0335a0457615a1b86f94ca1 2024-12-11T20:52:21,321 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/a36e6ed8b0335a0457615a1b86f94ca1/cf, FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/a36e6ed8b0335a0457615a1b86f94ca1/recovered.edits] 2024-12-11T20:52:21,321 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/2cd958c9ffcd19341df8ec89b3533695/cf, FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/2cd958c9ffcd19341df8ec89b3533695/recovered.edits] 2024-12-11T20:52:21,334 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/a36e6ed8b0335a0457615a1b86f94ca1/cf/5614705cbf80484e8ca6bf48d08e1b80 to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testEmptyExportFileSystemState/a36e6ed8b0335a0457615a1b86f94ca1/cf/5614705cbf80484e8ca6bf48d08e1b80 2024-12-11T20:52:21,334 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/2cd958c9ffcd19341df8ec89b3533695/cf/97cd4bd1c26548a3884481e2199addbe to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testEmptyExportFileSystemState/2cd958c9ffcd19341df8ec89b3533695/cf/97cd4bd1c26548a3884481e2199addbe 2024-12-11T20:52:21,337 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/a36e6ed8b0335a0457615a1b86f94ca1/recovered.edits/9.seqid to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testEmptyExportFileSystemState/a36e6ed8b0335a0457615a1b86f94ca1/recovered.edits/9.seqid 2024-12-11T20:52:21,337 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/2cd958c9ffcd19341df8ec89b3533695/recovered.edits/9.seqid to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testEmptyExportFileSystemState/2cd958c9ffcd19341df8ec89b3533695/recovered.edits/9.seqid 2024-12-11T20:52:21,338 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/a36e6ed8b0335a0457615a1b86f94ca1 2024-12-11T20:52:21,338 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testEmptyExportFileSystemState/2cd958c9ffcd19341df8ec89b3533695 2024-12-11T20:52:21,338 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-12-11T20:52:21,341 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=212, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-11T20:52:21,344 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-12-11T20:52:21,346 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-12-11T20:52:21,347 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=212, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-11T20:52:21,347 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-12-11T20:52:21,347 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733950341347"}]},"ts":"9223372036854775807"} 2024-12-11T20:52:21,347 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733950341347"}]},"ts":"9223372036854775807"} 2024-12-11T20:52:21,349 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T20:52:21,349 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 2cd958c9ffcd19341df8ec89b3533695, NAME => 'testtb-testEmptyExportFileSystemState,,1733950327751.2cd958c9ffcd19341df8ec89b3533695.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => a36e6ed8b0335a0457615a1b86f94ca1, NAME => 'testtb-testEmptyExportFileSystemState,1,1733950327751.a36e6ed8b0335a0457615a1b86f94ca1.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T20:52:21,349 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-12-11T20:52:21,349 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733950341349"}]},"ts":"9223372036854775807"} 2024-12-11T20:52:21,351 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-12-11T20:52:21,351 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=212, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-11T20:52:21,353 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=212, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 38 msec 2024-12-11T20:52:21,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-11T20:52:21,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-11T20:52:21,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-11T20:52:21,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-11T20:52:21,383 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-11T20:52:21,383 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-11T20:52:21,385 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-11T20:52:21,385 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-11T20:52:21,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-11T20:52:21,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-11T20:52:21,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-11T20:52:21,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:52:21,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:52:21,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:52:21,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-11T20:52:21,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:52:21,392 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:52:21,392 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:52:21,392 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:52:21,392 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:52:21,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=212 2024-12-11T20:52:21,393 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testEmptyExportFileSystemState 2024-12-11T20:52:21,393 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-11T20:52:21,399 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-12-11T20:52:21,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T20:52:21,402 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-12-11T20:52:21,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-12-11T20:52:21,424 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=814 (was 804) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_754086280_22 at /127.0.0.1:54398 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_754086280_22 at /127.0.0.1:48668 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_754086280_22 at /127.0.0.1:49926 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-6692 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35471 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (166867004) connection to localhost/127.0.0.1:35471 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_922002592_1 at /127.0.0.1:35324 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (166867004) connection to localhost/127.0.0.1:39661 from appattempt_1733950008312_0008_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: process reaper (pid 132128) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=804 (was 779) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=834 (was 930), ProcessCount=14 (was 14), AvailableMemoryMB=1202 (was 2033) 2024-12-11T20:52:21,424 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=814 is superior to 500 2024-12-11T20:52:21,443 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=814, OpenFileDescriptor=804, MaxFileDescriptor=1048576, SystemLoadAverage=834, ProcessCount=14, AvailableMemoryMB=1202 2024-12-11T20:52:21,443 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=814 is superior to 500 2024-12-11T20:52:21,445 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T20:52:21,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum 2024-12-11T20:52:21,447 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T20:52:21,447 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:52:21,447 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 213 2024-12-11T20:52:21,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-11T20:52:21,448 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T20:52:21,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742279_1455 (size=404) 2024-12-11T20:52:21,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742279_1455 (size=404) 2024-12-11T20:52:21,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742279_1455 (size=404) 2024-12-11T20:52:21,471 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 85706d21c97dbe3e5e92bf8ae7406a07, NAME => 'testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:52:21,471 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => b5b4959b71b91bebe4a19decdaac194a, NAME => 'testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:52:21,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742280_1456 (size=65) 2024-12-11T20:52:21,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742280_1456 (size=65) 2024-12-11T20:52:21,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742280_1456 (size=65) 2024-12-11T20:52:21,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742281_1457 (size=65) 2024-12-11T20:52:21,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742281_1457 (size=65) 2024-12-11T20:52:21,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742281_1457 (size=65) 2024-12-11T20:52:21,478 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:52:21,478 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1722): Closing 85706d21c97dbe3e5e92bf8ae7406a07, disabling compactions & flushes 2024-12-11T20:52:21,478 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07. 2024-12-11T20:52:21,478 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07. 2024-12-11T20:52:21,478 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:52:21,478 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07. after waiting 0 ms 2024-12-11T20:52:21,478 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07. 2024-12-11T20:52:21,478 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07. 2024-12-11T20:52:21,478 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1722): Closing b5b4959b71b91bebe4a19decdaac194a, disabling compactions & flushes 2024-12-11T20:52:21,479 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1676): Region close journal for 85706d21c97dbe3e5e92bf8ae7406a07: Waiting for close lock at 1733950341478Disabling compacts and flushes for region at 1733950341478Disabling writes for close at 1733950341478Writing region close event to WAL at 1733950341478Closed at 1733950341478 2024-12-11T20:52:21,479 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a. 2024-12-11T20:52:21,479 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a. 2024-12-11T20:52:21,479 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a. after waiting 0 ms 2024-12-11T20:52:21,479 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a. 2024-12-11T20:52:21,479 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a. 2024-12-11T20:52:21,479 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1676): Region close journal for b5b4959b71b91bebe4a19decdaac194a: Waiting for close lock at 1733950341478Disabling compacts and flushes for region at 1733950341478Disabling writes for close at 1733950341479 (+1 ms)Writing region close event to WAL at 1733950341479Closed at 1733950341479 2024-12-11T20:52:21,480 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T20:52:21,480 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733950341480"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950341480"}]},"ts":"1733950341480"} 2024-12-11T20:52:21,480 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733950341480"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950341480"}]},"ts":"1733950341480"} 2024-12-11T20:52:21,482 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T20:52:21,483 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T20:52:21,484 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950341484"}]},"ts":"1733950341484"} 2024-12-11T20:52:21,485 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-12-11T20:52:21,486 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {5be53b084ac7=0} racks are {/default-rack=0} 2024-12-11T20:52:21,487 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T20:52:21,487 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T20:52:21,487 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T20:52:21,487 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T20:52:21,487 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T20:52:21,487 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T20:52:21,487 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T20:52:21,487 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T20:52:21,487 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T20:52:21,487 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T20:52:21,487 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=85706d21c97dbe3e5e92bf8ae7406a07, ASSIGN}, {pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=b5b4959b71b91bebe4a19decdaac194a, ASSIGN}] 2024-12-11T20:52:21,489 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=85706d21c97dbe3e5e92bf8ae7406a07, ASSIGN 2024-12-11T20:52:21,489 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=b5b4959b71b91bebe4a19decdaac194a, ASSIGN 2024-12-11T20:52:21,490 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=b5b4959b71b91bebe4a19decdaac194a, ASSIGN; state=OFFLINE, location=5be53b084ac7,40781,1733949995244; forceNewPlan=false, retain=false 2024-12-11T20:52:21,490 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=85706d21c97dbe3e5e92bf8ae7406a07, ASSIGN; state=OFFLINE, location=5be53b084ac7,39865,1733949995112; forceNewPlan=false, retain=false 2024-12-11T20:52:21,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-11T20:52:21,640 INFO [5be53b084ac7:41779 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T20:52:21,640 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=b5b4959b71b91bebe4a19decdaac194a, regionState=OPENING, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:52:21,640 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=85706d21c97dbe3e5e92bf8ae7406a07, regionState=OPENING, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:52:21,642 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=b5b4959b71b91bebe4a19decdaac194a, ASSIGN because future has completed 2024-12-11T20:52:21,642 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=216, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure b5b4959b71b91bebe4a19decdaac194a, server=5be53b084ac7,40781,1733949995244}] 2024-12-11T20:52:21,643 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=85706d21c97dbe3e5e92bf8ae7406a07, ASSIGN because future has completed 2024-12-11T20:52:21,643 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=217, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure 85706d21c97dbe3e5e92bf8ae7406a07, server=5be53b084ac7,39865,1733949995112}] 2024-12-11T20:52:21,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-11T20:52:21,798 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07. 2024-12-11T20:52:21,798 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a. 2024-12-11T20:52:21,798 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7752): Opening region: {ENCODED => b5b4959b71b91bebe4a19decdaac194a, NAME => 'testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T20:52:21,798 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7752): Opening region: {ENCODED => 85706d21c97dbe3e5e92bf8ae7406a07, NAME => 'testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T20:52:21,798 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a. service=AccessControlService 2024-12-11T20:52:21,798 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07. service=AccessControlService 2024-12-11T20:52:21,799 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:52:21,799 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:52:21,799 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 85706d21c97dbe3e5e92bf8ae7406a07 2024-12-11T20:52:21,799 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum b5b4959b71b91bebe4a19decdaac194a 2024-12-11T20:52:21,799 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:52:21,799 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:52:21,799 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7794): checking encryption for 85706d21c97dbe3e5e92bf8ae7406a07 2024-12-11T20:52:21,799 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7794): checking encryption for b5b4959b71b91bebe4a19decdaac194a 2024-12-11T20:52:21,799 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7797): checking classloading for 85706d21c97dbe3e5e92bf8ae7406a07 2024-12-11T20:52:21,799 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7797): checking classloading for b5b4959b71b91bebe4a19decdaac194a 2024-12-11T20:52:21,802 INFO [StoreOpener-85706d21c97dbe3e5e92bf8ae7406a07-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 85706d21c97dbe3e5e92bf8ae7406a07 2024-12-11T20:52:21,804 INFO [StoreOpener-b5b4959b71b91bebe4a19decdaac194a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b5b4959b71b91bebe4a19decdaac194a 2024-12-11T20:52:21,804 INFO [StoreOpener-85706d21c97dbe3e5e92bf8ae7406a07-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 85706d21c97dbe3e5e92bf8ae7406a07 columnFamilyName cf 2024-12-11T20:52:21,805 DEBUG [StoreOpener-85706d21c97dbe3e5e92bf8ae7406a07-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:52:21,805 INFO [StoreOpener-85706d21c97dbe3e5e92bf8ae7406a07-1 {}] regionserver.HStore(327): Store=85706d21c97dbe3e5e92bf8ae7406a07/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:52:21,805 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1038): replaying wal for 85706d21c97dbe3e5e92bf8ae7406a07 2024-12-11T20:52:21,805 INFO [StoreOpener-b5b4959b71b91bebe4a19decdaac194a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b5b4959b71b91bebe4a19decdaac194a columnFamilyName cf 2024-12-11T20:52:21,805 DEBUG [StoreOpener-b5b4959b71b91bebe4a19decdaac194a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:52:21,808 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/85706d21c97dbe3e5e92bf8ae7406a07 2024-12-11T20:52:21,808 INFO [StoreOpener-b5b4959b71b91bebe4a19decdaac194a-1 {}] regionserver.HStore(327): Store=b5b4959b71b91bebe4a19decdaac194a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:52:21,808 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/85706d21c97dbe3e5e92bf8ae7406a07 2024-12-11T20:52:21,808 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1038): replaying wal for b5b4959b71b91bebe4a19decdaac194a 2024-12-11T20:52:21,809 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1048): stopping wal replay for 85706d21c97dbe3e5e92bf8ae7406a07 2024-12-11T20:52:21,809 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1060): Cleaning up temporary data for 85706d21c97dbe3e5e92bf8ae7406a07 2024-12-11T20:52:21,809 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/b5b4959b71b91bebe4a19decdaac194a 2024-12-11T20:52:21,809 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/b5b4959b71b91bebe4a19decdaac194a 2024-12-11T20:52:21,809 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1048): stopping wal replay for b5b4959b71b91bebe4a19decdaac194a 2024-12-11T20:52:21,809 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1060): Cleaning up temporary data for b5b4959b71b91bebe4a19decdaac194a 2024-12-11T20:52:21,810 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1093): writing seq id for 85706d21c97dbe3e5e92bf8ae7406a07 2024-12-11T20:52:21,813 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1093): writing seq id for b5b4959b71b91bebe4a19decdaac194a 2024-12-11T20:52:21,815 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/85706d21c97dbe3e5e92bf8ae7406a07/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:52:21,816 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/b5b4959b71b91bebe4a19decdaac194a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:52:21,816 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1114): Opened 85706d21c97dbe3e5e92bf8ae7406a07; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67219380, jitterRate=0.0016468167304992676}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:52:21,816 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 85706d21c97dbe3e5e92bf8ae7406a07 2024-12-11T20:52:21,816 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1114): Opened b5b4959b71b91bebe4a19decdaac194a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60627377, jitterRate=-0.09658168256282806}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:52:21,816 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b5b4959b71b91bebe4a19decdaac194a 2024-12-11T20:52:21,817 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1006): Region open journal for b5b4959b71b91bebe4a19decdaac194a: Running coprocessor pre-open hook at 1733950341799Writing region info on filesystem at 1733950341799Initializing all the Stores at 1733950341804 (+5 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950341804Cleaning up temporary data from old regions at 1733950341809 (+5 ms)Running coprocessor post-open hooks at 1733950341816 (+7 ms)Region opened successfully at 1733950341817 (+1 ms) 2024-12-11T20:52:21,817 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1006): Region open journal for 85706d21c97dbe3e5e92bf8ae7406a07: Running coprocessor pre-open hook at 1733950341799Writing region info on filesystem at 1733950341799Initializing all the Stores at 1733950341800 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950341800Cleaning up temporary data from old regions at 1733950341809 (+9 ms)Running coprocessor post-open hooks at 1733950341816 (+7 ms)Region opened successfully at 1733950341817 (+1 ms) 2024-12-11T20:52:21,818 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07., pid=217, masterSystemTime=1733950341794 2024-12-11T20:52:21,819 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a., pid=216, masterSystemTime=1733950341794 2024-12-11T20:52:21,820 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07. 2024-12-11T20:52:21,820 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07. 2024-12-11T20:52:21,820 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=85706d21c97dbe3e5e92bf8ae7406a07, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:52:21,820 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a. 2024-12-11T20:52:21,820 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a. 2024-12-11T20:52:21,823 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=b5b4959b71b91bebe4a19decdaac194a, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:52:21,826 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=217, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure 85706d21c97dbe3e5e92bf8ae7406a07, server=5be53b084ac7,39865,1733949995112 because future has completed 2024-12-11T20:52:21,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=216, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure b5b4959b71b91bebe4a19decdaac194a, server=5be53b084ac7,40781,1733949995244 because future has completed 2024-12-11T20:52:21,830 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=217, resume processing ppid=214 2024-12-11T20:52:21,830 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=217, ppid=214, state=SUCCESS, hasLock=false; OpenRegionProcedure 85706d21c97dbe3e5e92bf8ae7406a07, server=5be53b084ac7,39865,1733949995112 in 185 msec 2024-12-11T20:52:21,830 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=216, resume processing ppid=215 2024-12-11T20:52:21,830 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=216, ppid=215, state=SUCCESS, hasLock=false; OpenRegionProcedure b5b4959b71b91bebe4a19decdaac194a, server=5be53b084ac7,40781,1733949995244 in 186 msec 2024-12-11T20:52:21,831 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=214, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=85706d21c97dbe3e5e92bf8ae7406a07, ASSIGN in 343 msec 2024-12-11T20:52:21,832 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=215, resume processing ppid=213 2024-12-11T20:52:21,832 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=215, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=b5b4959b71b91bebe4a19decdaac194a, ASSIGN in 343 msec 2024-12-11T20:52:21,833 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T20:52:21,833 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950341833"}]},"ts":"1733950341833"} 2024-12-11T20:52:21,837 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-12-11T20:52:21,838 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T20:52:21,838 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-12-11T20:52:21,841 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-11T20:52:21,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:52:21,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:52:21,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:52:21,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:52:21,896 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:52:21,896 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-11T20:52:21,896 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:52:21,897 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-11T20:52:21,897 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:52:21,897 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:52:21,897 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-11T20:52:21,897 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-11T20:52:21,898 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=213, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum in 450 msec 2024-12-11T20:52:22,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-11T20:52:22,073 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-11T20:52:22,073 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithChecksum get assigned. Timeout = 60000ms 2024-12-11T20:52:22,074 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:52:22,077 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40781 {}] regionserver.StoreScanner(1138): Switch to stream read (scanned=32829 bytes) of info 2024-12-11T20:52:22,079 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithChecksum assigned to meta. Checking AM states. 2024-12-11T20:52:22,080 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:52:22,080 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithChecksum assigned. 2024-12-11T20:52:22,080 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-11T20:52:22,083 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-11T20:52:22,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733950342083 (current time:1733950342083). 2024-12-11T20:52:22,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T20:52:22,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-11T20:52:22,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T20:52:22,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6fc1cc3f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:52:22,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:52:22,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:52:22,087 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:52:22,087 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:52:22,087 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:52:22,087 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c259cca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:52:22,088 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:52:22,088 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:52:22,088 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:52:22,089 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54892, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:52:22,089 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36fc6111, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:52:22,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:52:22,090 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:52:22,091 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:52:22,092 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46754, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:52:22,093 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:52:22,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:52:22,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:52:22,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:52:22,094 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:52:22,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@dc0f46, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:52:22,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:52:22,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:52:22,095 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:52:22,095 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:52:22,096 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:52:22,096 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4503c66c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:52:22,096 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:52:22,096 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:52:22,096 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:52:22,097 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54910, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:52:22,097 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c77d609, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:52:22,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:52:22,098 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:52:22,099 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:52:22,100 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46758, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:52:22,101 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:52:22,102 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:52:22,103 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37296, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:52:22,103 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779. 2024-12-11T20:52:22,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:52:22,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:52:22,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:52:22,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-11T20:52:22,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T20:52:22,105 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:52:22,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-11T20:52:22,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-12-11T20:52:22,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-11T20:52:22,107 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T20:52:22,109 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T20:52:22,112 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T20:52:22,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742282_1458 (size=161) 2024-12-11T20:52:22,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742282_1458 (size=161) 2024-12-11T20:52:22,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742282_1458 (size=161) 2024-12-11T20:52:22,131 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T20:52:22,131 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 85706d21c97dbe3e5e92bf8ae7406a07}, {pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b5b4959b71b91bebe4a19decdaac194a}] 2024-12-11T20:52:22,132 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 85706d21c97dbe3e5e92bf8ae7406a07 2024-12-11T20:52:22,133 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b5b4959b71b91bebe4a19decdaac194a 2024-12-11T20:52:22,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-11T20:52:22,285 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40781 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=220 2024-12-11T20:52:22,285 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=219 2024-12-11T20:52:22,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07. 2024-12-11T20:52:22,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a. 2024-12-11T20:52:22,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.HRegion(2603): Flush status journal for 85706d21c97dbe3e5e92bf8ae7406a07: 2024-12-11T20:52:22,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07. for emptySnaptb0-testExportWithChecksum completed. 2024-12-11T20:52:22,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.HRegion(2603): Flush status journal for b5b4959b71b91bebe4a19decdaac194a: 2024-12-11T20:52:22,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a. for emptySnaptb0-testExportWithChecksum completed. 2024-12-11T20:52:22,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-11T20:52:22,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:52:22,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-11T20:52:22,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:52:22,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T20:52:22,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T20:52:22,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742284_1460 (size=68) 2024-12-11T20:52:22,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742283_1459 (size=68) 2024-12-11T20:52:22,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742284_1460 (size=68) 2024-12-11T20:52:22,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742284_1460 (size=68) 2024-12-11T20:52:22,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a. 2024-12-11T20:52:22,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=220 2024-12-11T20:52:22,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742283_1459 (size=68) 2024-12-11T20:52:22,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742283_1459 (size=68) 2024-12-11T20:52:22,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=220 2024-12-11T20:52:22,316 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region b5b4959b71b91bebe4a19decdaac194a 2024-12-11T20:52:22,317 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b5b4959b71b91bebe4a19decdaac194a 2024-12-11T20:52:22,318 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07. 2024-12-11T20:52:22,318 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=219 2024-12-11T20:52:22,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=219 2024-12-11T20:52:22,319 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 85706d21c97dbe3e5e92bf8ae7406a07 2024-12-11T20:52:22,321 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=220, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b5b4959b71b91bebe4a19decdaac194a in 187 msec 2024-12-11T20:52:22,321 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 85706d21c97dbe3e5e92bf8ae7406a07 2024-12-11T20:52:22,324 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=219, resume processing ppid=218 2024-12-11T20:52:22,324 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=219, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 85706d21c97dbe3e5e92bf8ae7406a07 in 191 msec 2024-12-11T20:52:22,324 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T20:52:22,326 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T20:52:22,327 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T20:52:22,328 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-12-11T20:52:22,328 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-12-11T20:52:22,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742285_1461 (size=543) 2024-12-11T20:52:22,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742285_1461 (size=543) 2024-12-11T20:52:22,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742285_1461 (size=543) 2024-12-11T20:52:22,372 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T20:52:22,377 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T20:52:22,377 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-12-11T20:52:22,378 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T20:52:22,378 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-12-11T20:52:22,380 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=218, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 273 msec 2024-12-11T20:52:22,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-11T20:52:22,423 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-12-11T20:52:22,431 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='0349702ec90bfd57da1702c8498803308', locateType=CURRENT is [region=testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:52:22,437 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='112f112a882d4fe1db8ce23343978ac41', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:52:22,438 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='3b2347e60f11dbe416b1ca2671dbb3a29', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:52:22,439 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='4d0d190b650ba0a409083a954996337f7', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:52:22,443 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39865 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T20:52:22,444 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='213b827055aea045dc160559436da63ac', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:52:22,445 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='65fb59075d04eb586e1917b4bede9b646', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:52:22,446 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='58788355b451edf9cfd6c79cf33aee436', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:52:22,450 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='3bc792d53d87645dd9c90e74d8af69e7', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:52:22,451 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='4dfc0b87f6676dae6395e3018d09d108', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:52:22,455 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40781 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T20:52:22,461 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-11T20:52:22,464 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-12-11T20:52:22,464 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07. 2024-12-11T20:52:22,465 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:52:22,466 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-11T20:52:22,472 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-11T20:52:22,478 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-11T20:52:22,481 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-11T20:52:22,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733950342481 (current time:1733950342481). 2024-12-11T20:52:22,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T20:52:22,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-11T20:52:22,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T20:52:22,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6283c72f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:52:22,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:52:22,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:52:22,483 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:52:22,483 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:52:22,483 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:52:22,483 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ebb50b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:52:22,483 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:52:22,483 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:52:22,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:52:22,484 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54930, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:52:22,485 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@718d6118, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:52:22,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:52:22,486 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:52:22,486 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:52:22,488 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46768, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:52:22,489 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779. 2024-12-11T20:52:22,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:52:22,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:52:22,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:52:22,489 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:52:22,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@554a1c8c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:52:22,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:52:22,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:52:22,491 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:52:22,492 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:52:22,492 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:52:22,492 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1433694b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:52:22,492 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:52:22,492 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:52:22,492 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:52:22,493 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54960, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:52:22,494 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77f00558, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:52:22,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:52:22,495 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:52:22,495 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:52:22,496 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46784, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:52:22,498 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:52:22,498 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:52:22,499 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37298, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:52:22,500 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779. 2024-12-11T20:52:22,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:52:22,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:52:22,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:52:22,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-11T20:52:22,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T20:52:22,502 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:52:22,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-11T20:52:22,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-12-11T20:52:22,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-11T20:52:22,511 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T20:52:22,512 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T20:52:22,514 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T20:52:22,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742286_1462 (size=156) 2024-12-11T20:52:22,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742286_1462 (size=156) 2024-12-11T20:52:22,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742286_1462 (size=156) 2024-12-11T20:52:22,527 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T20:52:22,527 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 85706d21c97dbe3e5e92bf8ae7406a07}, {pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b5b4959b71b91bebe4a19decdaac194a}] 2024-12-11T20:52:22,528 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 85706d21c97dbe3e5e92bf8ae7406a07 2024-12-11T20:52:22,528 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b5b4959b71b91bebe4a19decdaac194a 2024-12-11T20:52:22,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-11T20:52:22,680 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=222 2024-12-11T20:52:22,680 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40781 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=223 2024-12-11T20:52:22,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07. 2024-12-11T20:52:22,681 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2902): Flushing 85706d21c97dbe3e5e92bf8ae7406a07 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-11T20:52:22,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a. 2024-12-11T20:52:22,683 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2902): Flushing b5b4959b71b91bebe4a19decdaac194a 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-11T20:52:22,722 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/85706d21c97dbe3e5e92bf8ae7406a07/.tmp/cf/359d2fa4b52b46668f145ebe04a870f8 is 71, key is 078428a35d4e1accf7572692cfe51087/cf:q/1733950342443/Put/seqid=0 2024-12-11T20:52:22,729 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/b5b4959b71b91bebe4a19decdaac194a/.tmp/cf/3b6247262ece4e749b0a7bd171a69454 is 71, key is 1204a00ec29fb36607d41f91e3a0fa0b/cf:q/1733950342455/Put/seqid=0 2024-12-11T20:52:22,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742287_1463 (size=5356) 2024-12-11T20:52:22,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742287_1463 (size=5356) 2024-12-11T20:52:22,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742287_1463 (size=5356) 2024-12-11T20:52:22,744 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/85706d21c97dbe3e5e92bf8ae7406a07/.tmp/cf/359d2fa4b52b46668f145ebe04a870f8 2024-12-11T20:52:22,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/85706d21c97dbe3e5e92bf8ae7406a07/.tmp/cf/359d2fa4b52b46668f145ebe04a870f8 as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/85706d21c97dbe3e5e92bf8ae7406a07/cf/359d2fa4b52b46668f145ebe04a870f8 2024-12-11T20:52:22,768 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/85706d21c97dbe3e5e92bf8ae7406a07/cf/359d2fa4b52b46668f145ebe04a870f8, entries=4, sequenceid=6, filesize=5.2 K 2024-12-11T20:52:22,769 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 85706d21c97dbe3e5e92bf8ae7406a07 in 88ms, sequenceid=6, compaction requested=false 2024-12-11T20:52:22,769 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-12-11T20:52:22,770 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2603): Flush status journal for 85706d21c97dbe3e5e92bf8ae7406a07: 2024-12-11T20:52:22,770 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07. for snaptb0-testExportWithChecksum completed. 2024-12-11T20:52:22,770 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-11T20:52:22,770 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:52:22,770 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/85706d21c97dbe3e5e92bf8ae7406a07/cf/359d2fa4b52b46668f145ebe04a870f8] hfiles 2024-12-11T20:52:22,770 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/85706d21c97dbe3e5e92bf8ae7406a07/cf/359d2fa4b52b46668f145ebe04a870f8 for snapshot=snaptb0-testExportWithChecksum 2024-12-11T20:52:22,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742288_1464 (size=8258) 2024-12-11T20:52:22,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742288_1464 (size=8258) 2024-12-11T20:52:22,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742288_1464 (size=8258) 2024-12-11T20:52:22,789 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/b5b4959b71b91bebe4a19decdaac194a/.tmp/cf/3b6247262ece4e749b0a7bd171a69454 2024-12-11T20:52:22,806 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/b5b4959b71b91bebe4a19decdaac194a/.tmp/cf/3b6247262ece4e749b0a7bd171a69454 as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/b5b4959b71b91bebe4a19decdaac194a/cf/3b6247262ece4e749b0a7bd171a69454 2024-12-11T20:52:22,812 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/b5b4959b71b91bebe4a19decdaac194a/cf/3b6247262ece4e749b0a7bd171a69454, entries=46, sequenceid=6, filesize=8.1 K 2024-12-11T20:52:22,813 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for b5b4959b71b91bebe4a19decdaac194a in 130ms, sequenceid=6, compaction requested=false 2024-12-11T20:52:22,813 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2603): Flush status journal for b5b4959b71b91bebe4a19decdaac194a: 2024-12-11T20:52:22,813 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a. for snaptb0-testExportWithChecksum completed. 2024-12-11T20:52:22,813 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-11T20:52:22,813 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:52:22,813 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/b5b4959b71b91bebe4a19decdaac194a/cf/3b6247262ece4e749b0a7bd171a69454] hfiles 2024-12-11T20:52:22,813 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/b5b4959b71b91bebe4a19decdaac194a/cf/3b6247262ece4e749b0a7bd171a69454 for snapshot=snaptb0-testExportWithChecksum 2024-12-11T20:52:22,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-11T20:52:22,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742289_1465 (size=107) 2024-12-11T20:52:22,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742289_1465 (size=107) 2024-12-11T20:52:22,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742289_1465 (size=107) 2024-12-11T20:52:22,847 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07. 2024-12-11T20:52:22,847 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=222 2024-12-11T20:52:22,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=222 2024-12-11T20:52:22,847 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 85706d21c97dbe3e5e92bf8ae7406a07 2024-12-11T20:52:22,848 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 85706d21c97dbe3e5e92bf8ae7406a07 2024-12-11T20:52:22,851 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=222, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 85706d21c97dbe3e5e92bf8ae7406a07 in 321 msec 2024-12-11T20:52:22,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742290_1466 (size=107) 2024-12-11T20:52:22,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742290_1466 (size=107) 2024-12-11T20:52:22,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742290_1466 (size=107) 2024-12-11T20:52:22,871 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a. 2024-12-11T20:52:22,871 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=223 2024-12-11T20:52:22,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=223 2024-12-11T20:52:22,872 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region b5b4959b71b91bebe4a19decdaac194a 2024-12-11T20:52:22,872 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b5b4959b71b91bebe4a19decdaac194a 2024-12-11T20:52:22,878 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=223, resume processing ppid=221 2024-12-11T20:52:22,878 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T20:52:22,878 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=223, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b5b4959b71b91bebe4a19decdaac194a in 347 msec 2024-12-11T20:52:22,879 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T20:52:22,880 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T20:52:22,880 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-12-11T20:52:22,881 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-11T20:52:22,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742291_1467 (size=621) 2024-12-11T20:52:22,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742291_1467 (size=621) 2024-12-11T20:52:22,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742291_1467 (size=621) 2024-12-11T20:52:22,949 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T20:52:22,963 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T20:52:22,964 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-11T20:52:22,966 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T20:52:22,966 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-12-11T20:52:22,968 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=221, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 465 msec 2024-12-11T20:52:23,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-11T20:52:23,133 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-12-11T20:52:23,133 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950343133 2024-12-11T20:52:23,133 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950343133, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950343133, srcFsUri=hdfs://localhost:36717, srcDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:52:23,175 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:36717, inputRoot=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:52:23,175 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@25b57f86, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950343133, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950343133/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-11T20:52:23,176 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T20:52:23,180 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950343133/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-11T20:52:23,228 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:52:23,229 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:52:23,229 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:52:23,973 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-11T20:52:23,973 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-12-11T20:52:23,974 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-11T20:52:24,330 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop-5959783716645719459.jar 2024-12-11T20:52:24,330 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:52:24,330 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:52:24,390 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop-568801278302599745.jar 2024-12-11T20:52:24,391 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:52:24,391 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:52:24,391 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:52:24,392 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:52:24,392 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:52:24,392 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:52:24,393 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T20:52:24,393 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T20:52:24,393 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T20:52:24,393 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T20:52:24,394 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T20:52:24,394 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T20:52:24,394 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T20:52:24,394 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T20:52:24,395 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T20:52:24,395 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T20:52:24,395 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T20:52:24,395 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:52:24,395 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:52:24,396 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:52:24,396 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:52:24,396 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:52:24,396 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:52:24,396 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:52:24,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742292_1468 (size=131440) 2024-12-11T20:52:24,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742292_1468 (size=131440) 2024-12-11T20:52:24,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742292_1468 (size=131440) 2024-12-11T20:52:24,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742293_1469 (size=4188619) 2024-12-11T20:52:24,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742293_1469 (size=4188619) 2024-12-11T20:52:24,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742293_1469 (size=4188619) 2024-12-11T20:52:24,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742294_1470 (size=1323991) 2024-12-11T20:52:24,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742294_1470 (size=1323991) 2024-12-11T20:52:24,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742294_1470 (size=1323991) 2024-12-11T20:52:24,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742295_1471 (size=903544) 2024-12-11T20:52:24,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742295_1471 (size=903544) 2024-12-11T20:52:24,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742295_1471 (size=903544) 2024-12-11T20:52:24,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742296_1472 (size=8360360) 2024-12-11T20:52:24,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742296_1472 (size=8360360) 2024-12-11T20:52:24,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742296_1472 (size=8360360) 2024-12-11T20:52:24,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742297_1473 (size=1877034) 2024-12-11T20:52:24,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742297_1473 (size=1877034) 2024-12-11T20:52:24,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742297_1473 (size=1877034) 2024-12-11T20:52:24,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742298_1474 (size=443172) 2024-12-11T20:52:24,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742298_1474 (size=443172) 2024-12-11T20:52:24,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742298_1474 (size=443172) 2024-12-11T20:52:24,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742299_1475 (size=77835) 2024-12-11T20:52:24,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742299_1475 (size=77835) 2024-12-11T20:52:24,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742299_1475 (size=77835) 2024-12-11T20:52:24,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742300_1476 (size=30949) 2024-12-11T20:52:24,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742300_1476 (size=30949) 2024-12-11T20:52:24,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742300_1476 (size=30949) 2024-12-11T20:52:24,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742301_1477 (size=1597213) 2024-12-11T20:52:24,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742301_1477 (size=1597213) 2024-12-11T20:52:24,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742301_1477 (size=1597213) 2024-12-11T20:52:25,130 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_1/usercache/jenkins/appcache/application_1733950008312_0008/container_1733950008312_0008_01_000001/launch_container.sh] 2024-12-11T20:52:25,130 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_1/usercache/jenkins/appcache/application_1733950008312_0008/container_1733950008312_0008_01_000001/container_tokens] 2024-12-11T20:52:25,130 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_1/usercache/jenkins/appcache/application_1733950008312_0008/container_1733950008312_0008_01_000001/sysfs] 2024-12-11T20:52:25,132 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0008_000001 (auth:SIMPLE) from 127.0.0.1:49986 2024-12-11T20:52:25,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742302_1478 (size=6426228) 2024-12-11T20:52:25,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742302_1478 (size=6426228) 2024-12-11T20:52:25,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742302_1478 (size=6426228) 2024-12-11T20:52:25,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742303_1479 (size=4695811) 2024-12-11T20:52:25,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742303_1479 (size=4695811) 2024-12-11T20:52:25,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742303_1479 (size=4695811) 2024-12-11T20:52:25,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742304_1480 (size=232957) 2024-12-11T20:52:25,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742304_1480 (size=232957) 2024-12-11T20:52:25,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742304_1480 (size=232957) 2024-12-11T20:52:25,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742305_1481 (size=127628) 2024-12-11T20:52:25,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742305_1481 (size=127628) 2024-12-11T20:52:25,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742305_1481 (size=127628) 2024-12-11T20:52:25,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742306_1482 (size=20406) 2024-12-11T20:52:25,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742306_1482 (size=20406) 2024-12-11T20:52:25,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742306_1482 (size=20406) 2024-12-11T20:52:25,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742307_1483 (size=5175431) 2024-12-11T20:52:25,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742307_1483 (size=5175431) 2024-12-11T20:52:25,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742307_1483 (size=5175431) 2024-12-11T20:52:25,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742308_1484 (size=217634) 2024-12-11T20:52:25,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742308_1484 (size=217634) 2024-12-11T20:52:25,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742308_1484 (size=217634) 2024-12-11T20:52:25,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742309_1485 (size=1832290) 2024-12-11T20:52:25,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742309_1485 (size=1832290) 2024-12-11T20:52:25,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742309_1485 (size=1832290) 2024-12-11T20:52:25,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742310_1486 (size=322274) 2024-12-11T20:52:25,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742310_1486 (size=322274) 2024-12-11T20:52:25,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742310_1486 (size=322274) 2024-12-11T20:52:25,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742311_1487 (size=503880) 2024-12-11T20:52:25,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742311_1487 (size=503880) 2024-12-11T20:52:25,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742311_1487 (size=503880) 2024-12-11T20:52:25,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742312_1488 (size=29229) 2024-12-11T20:52:25,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742312_1488 (size=29229) 2024-12-11T20:52:25,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742312_1488 (size=29229) 2024-12-11T20:52:25,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742313_1489 (size=24096) 2024-12-11T20:52:25,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742313_1489 (size=24096) 2024-12-11T20:52:25,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742313_1489 (size=24096) 2024-12-11T20:52:25,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742314_1490 (size=111872) 2024-12-11T20:52:25,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742314_1490 (size=111872) 2024-12-11T20:52:25,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742314_1490 (size=111872) 2024-12-11T20:52:25,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742315_1491 (size=45609) 2024-12-11T20:52:25,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742315_1491 (size=45609) 2024-12-11T20:52:25,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742315_1491 (size=45609) 2024-12-11T20:52:25,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742316_1492 (size=136454) 2024-12-11T20:52:25,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742316_1492 (size=136454) 2024-12-11T20:52:25,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742316_1492 (size=136454) 2024-12-11T20:52:25,488 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T20:52:25,493 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-11T20:52:25,495 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-11T20:52:25,495 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-11T20:52:25,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742317_1493 (size=441) 2024-12-11T20:52:25,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742317_1493 (size=441) 2024-12-11T20:52:25,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742317_1493 (size=441) 2024-12-11T20:52:25,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742318_1494 (size=21) 2024-12-11T20:52:25,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742318_1494 (size=21) 2024-12-11T20:52:25,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742318_1494 (size=21) 2024-12-11T20:52:25,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742319_1495 (size=304042) 2024-12-11T20:52:25,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742319_1495 (size=304042) 2024-12-11T20:52:25,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742319_1495 (size=304042) 2024-12-11T20:52:25,581 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T20:52:25,581 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T20:52:25,842 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0009_000001 (auth:SIMPLE) from 127.0.0.1:44206 2024-12-11T20:52:26,669 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T20:52:31,527 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0009_000001 (auth:SIMPLE) from 127.0.0.1:55346 2024-12-11T20:52:31,789 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T20:52:31,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742320_1496 (size=349740) 2024-12-11T20:52:31,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742320_1496 (size=349740) 2024-12-11T20:52:31,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742320_1496 (size=349740) 2024-12-11T20:52:33,725 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0009_000001 (auth:SIMPLE) from 127.0.0.1:39202 2024-12-11T20:52:33,725 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0009_000001 (auth:SIMPLE) from 127.0.0.1:36454 2024-12-11T20:52:38,916 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_0/usercache/jenkins/appcache/application_1733950008312_0009/container_1733950008312_0009_01_000002/launch_container.sh] 2024-12-11T20:52:38,916 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_0/usercache/jenkins/appcache/application_1733950008312_0009/container_1733950008312_0009_01_000002/container_tokens] 2024-12-11T20:52:38,917 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_0/usercache/jenkins/appcache/application_1733950008312_0009/container_1733950008312_0009_01_000002/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/b5b4959b71b91bebe4a19decdaac194a/cf/3b6247262ece4e749b0a7bd171a69454 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950343133/archive/data/default/testtb-testExportWithChecksum/b5b4959b71b91bebe4a19decdaac194a/cf/3b6247262ece4e749b0a7bd171a69454. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/85706d21c97dbe3e5e92bf8ae7406a07/cf/359d2fa4b52b46668f145ebe04a870f8 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950343133/archive/data/default/testtb-testExportWithChecksum/85706d21c97dbe3e5e92bf8ae7406a07/cf/359d2fa4b52b46668f145ebe04a870f8. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-11T20:52:40,110 WARN [regionserver/5be53b084ac7:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 2, running: 1 2024-12-11T20:52:40,132 WARN [regionserver/5be53b084ac7:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 3, running: 0 2024-12-11T20:52:40,472 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 58264c5b234b4bd11beed47f41492f9c, had cached 0 bytes from a total of 8188 2024-12-11T20:52:40,472 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 08787acc4c8783f43f2b29bbed40a0de, had cached 0 bytes from a total of 5422 2024-12-11T20:52:40,644 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0009_000001 (auth:SIMPLE) from 127.0.0.1:36462 2024-12-11T20:52:40,649 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0009_000001 (auth:SIMPLE) from 127.0.0.1:39214 2024-12-11T20:52:41,985 DEBUG [master/5be53b084ac7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 58264c5b234b4bd11beed47f41492f9c changed from -1.0 to 0.0, refreshing cache 2024-12-11T20:52:41,986 DEBUG [master/5be53b084ac7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 85706d21c97dbe3e5e92bf8ae7406a07 changed from -1.0 to 0.0, refreshing cache 2024-12-11T20:52:41,986 DEBUG [master/5be53b084ac7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 08787acc4c8783f43f2b29bbed40a0de changed from -1.0 to 0.0, refreshing cache 2024-12-11T20:52:41,986 DEBUG [master/5be53b084ac7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region b5b4959b71b91bebe4a19decdaac194a changed from -1.0 to 0.0, refreshing cache 2024-12-11T20:52:44,797 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_1/usercache/jenkins/appcache/application_1733950008312_0009/container_1733950008312_0009_01_000003/launch_container.sh] 2024-12-11T20:52:44,797 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_1/usercache/jenkins/appcache/application_1733950008312_0009/container_1733950008312_0009_01_000003/container_tokens] 2024-12-11T20:52:44,797 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_1/usercache/jenkins/appcache/application_1733950008312_0009/container_1733950008312_0009_01_000003/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/b5b4959b71b91bebe4a19decdaac194a/cf/3b6247262ece4e749b0a7bd171a69454 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950343133/archive/data/default/testtb-testExportWithChecksum/b5b4959b71b91bebe4a19decdaac194a/cf/3b6247262ece4e749b0a7bd171a69454. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-11T20:52:45,938 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_2/usercache/jenkins/appcache/application_1733950008312_0009/container_1733950008312_0009_01_000005/launch_container.sh] 2024-12-11T20:52:45,938 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_2/usercache/jenkins/appcache/application_1733950008312_0009/container_1733950008312_0009_01_000005/container_tokens] 2024-12-11T20:52:45,938 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_2/usercache/jenkins/appcache/application_1733950008312_0009/container_1733950008312_0009_01_000005/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/85706d21c97dbe3e5e92bf8ae7406a07/cf/359d2fa4b52b46668f145ebe04a870f8 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950343133/archive/data/default/testtb-testExportWithChecksum/85706d21c97dbe3e5e92bf8ae7406a07/cf/359d2fa4b52b46668f145ebe04a870f8. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-11T20:52:47,714 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0009_000001 (auth:SIMPLE) from 127.0.0.1:44148 2024-12-11T20:52:47,732 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0009_000001 (auth:SIMPLE) from 127.0.0.1:49014 2024-12-11T20:52:50,984 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_3/usercache/jenkins/appcache/application_1733950008312_0009/container_1733950008312_0009_01_000004/launch_container.sh] 2024-12-11T20:52:50,984 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_3/usercache/jenkins/appcache/application_1733950008312_0009/container_1733950008312_0009_01_000004/container_tokens] 2024-12-11T20:52:50,984 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_3/usercache/jenkins/appcache/application_1733950008312_0009/container_1733950008312_0009_01_000004/sysfs] 2024-12-11T20:52:57,048 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_1/usercache/jenkins/appcache/application_1733950008312_0009/container_1733950008312_0009_01_000007/launch_container.sh] 2024-12-11T20:52:57,048 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_1/usercache/jenkins/appcache/application_1733950008312_0009/container_1733950008312_0009_01_000007/container_tokens] 2024-12-11T20:52:57,048 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_1/usercache/jenkins/appcache/application_1733950008312_0009/container_1733950008312_0009_01_000007/sysfs] 2024-12-11T20:52:57,061 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_2/usercache/jenkins/appcache/application_1733950008312_0009/container_1733950008312_0009_01_000006/launch_container.sh] 2024-12-11T20:52:57,061 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_2/usercache/jenkins/appcache/application_1733950008312_0009/container_1733950008312_0009_01_000006/container_tokens] 2024-12-11T20:52:57,061 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_2/usercache/jenkins/appcache/application_1733950008312_0009/container_1733950008312_0009_01_000006/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/b5b4959b71b91bebe4a19decdaac194a/cf/3b6247262ece4e749b0a7bd171a69454 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950343133/archive/data/default/testtb-testExportWithChecksum/b5b4959b71b91bebe4a19decdaac194a/cf/3b6247262ece4e749b0a7bd171a69454. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/85706d21c97dbe3e5e92bf8ae7406a07/cf/359d2fa4b52b46668f145ebe04a870f8 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/local-export-1733950343133/archive/data/default/testtb-testExportWithChecksum/85706d21c97dbe3e5e92bf8ae7406a07/cf/359d2fa4b52b46668f145ebe04a870f8. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-11T20:52:58,748 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0009_000001 (auth:SIMPLE) from 127.0.0.1:54792 2024-12-11T20:52:58,761 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0009_000001 (auth:SIMPLE) from 127.0.0.1:54052 2024-12-11T20:53:01,790 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T20:53:06,799 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region b5b4959b71b91bebe4a19decdaac194a, had cached 0 bytes from a total of 8258 2024-12-11T20:53:06,799 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 85706d21c97dbe3e5e92bf8ae7406a07, had cached 0 bytes from a total of 5356 2024-12-11T20:53:10,269 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0009_000001 (auth:SIMPLE) from 127.0.0.1:41304 2024-12-11T20:53:10,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742321_1497 (size=30388) 2024-12-11T20:53:10,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742321_1497 (size=30388) 2024-12-11T20:53:10,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742321_1497 (size=30388) 2024-12-11T20:53:10,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742322_1498 (size=460) 2024-12-11T20:53:10,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742322_1498 (size=460) 2024-12-11T20:53:10,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742322_1498 (size=460) 2024-12-11T20:53:10,475 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733950008312_0009_01_000008 is : 143 2024-12-11T20:53:10,539 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_1/usercache/jenkins/appcache/application_1733950008312_0009/container_1733950008312_0009_01_000008/launch_container.sh] 2024-12-11T20:53:10,540 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_1/usercache/jenkins/appcache/application_1733950008312_0009/container_1733950008312_0009_01_000008/container_tokens] 2024-12-11T20:53:10,540 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_1/usercache/jenkins/appcache/application_1733950008312_0009/container_1733950008312_0009_01_000008/sysfs] 2024-12-11T20:53:11,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742323_1499 (size=30388) 2024-12-11T20:53:11,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742323_1499 (size=30388) 2024-12-11T20:53:11,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742323_1499 (size=30388) 2024-12-11T20:53:11,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742324_1500 (size=349740) 2024-12-11T20:53:11,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742324_1500 (size=349740) 2024-12-11T20:53:11,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742324_1500 (size=349740) 2024-12-11T20:53:13,056 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1239): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1733950008312_0009_m_000001 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:947) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1216) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:400) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:285) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T20:53:13,057 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950393057 2024-12-11T20:53:13,058 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:36717, tgtDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950393057, rawTgtDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950393057, srcFsUri=hdfs://localhost:36717, srcDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:53:13,178 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:36717, inputRoot=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:53:13,178 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_754086280_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950393057, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950393057/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-11T20:53:13,218 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T20:53:13,287 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950393057/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-11T20:53:13,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742325_1501 (size=156) 2024-12-11T20:53:13,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742325_1501 (size=156) 2024-12-11T20:53:13,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742325_1501 (size=156) 2024-12-11T20:53:13,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742326_1502 (size=621) 2024-12-11T20:53:13,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742326_1502 (size=621) 2024-12-11T20:53:13,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742326_1502 (size=621) 2024-12-11T20:53:13,531 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:53:13,531 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:53:13,532 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:53:15,716 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_2/usercache/jenkins/appcache/application_1733950008312_0009/container_1733950008312_0009_01_000009/launch_container.sh] 2024-12-11T20:53:15,716 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_2/usercache/jenkins/appcache/application_1733950008312_0009/container_1733950008312_0009_01_000009/container_tokens] 2024-12-11T20:53:15,717 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_2/usercache/jenkins/appcache/application_1733950008312_0009/container_1733950008312_0009_01_000009/sysfs] 2024-12-11T20:53:15,744 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop-8710854251784919882.jar 2024-12-11T20:53:15,745 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:53:15,745 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:53:15,917 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop-3650306004375911493.jar 2024-12-11T20:53:15,918 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:53:15,918 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:53:15,918 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:53:15,923 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:53:15,926 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:53:15,926 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:53:15,927 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T20:53:15,927 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T20:53:15,927 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T20:53:15,927 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T20:53:15,928 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T20:53:15,928 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T20:53:15,928 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T20:53:15,929 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T20:53:15,931 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T20:53:15,931 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T20:53:15,932 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T20:53:15,952 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:53:15,960 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:53:15,960 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:53:15,961 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:53:15,961 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:53:15,961 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:53:15,961 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:53:16,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742327_1503 (size=131440) 2024-12-11T20:53:16,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742327_1503 (size=131440) 2024-12-11T20:53:16,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742327_1503 (size=131440) 2024-12-11T20:53:16,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742328_1504 (size=4188619) 2024-12-11T20:53:16,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742328_1504 (size=4188619) 2024-12-11T20:53:16,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742328_1504 (size=4188619) 2024-12-11T20:53:16,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742329_1505 (size=1323991) 2024-12-11T20:53:16,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742329_1505 (size=1323991) 2024-12-11T20:53:16,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742329_1505 (size=1323991) 2024-12-11T20:53:16,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742330_1506 (size=903544) 2024-12-11T20:53:16,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742330_1506 (size=903544) 2024-12-11T20:53:16,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742330_1506 (size=903544) 2024-12-11T20:53:17,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742331_1507 (size=8360360) 2024-12-11T20:53:17,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742331_1507 (size=8360360) 2024-12-11T20:53:17,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742331_1507 (size=8360360) 2024-12-11T20:53:17,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742332_1508 (size=443172) 2024-12-11T20:53:17,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742332_1508 (size=443172) 2024-12-11T20:53:17,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742332_1508 (size=443172) 2024-12-11T20:53:17,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742333_1509 (size=1877034) 2024-12-11T20:53:17,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742333_1509 (size=1877034) 2024-12-11T20:53:17,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742333_1509 (size=1877034) 2024-12-11T20:53:17,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742334_1510 (size=77835) 2024-12-11T20:53:17,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742334_1510 (size=77835) 2024-12-11T20:53:17,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742334_1510 (size=77835) 2024-12-11T20:53:17,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742335_1511 (size=30949) 2024-12-11T20:53:17,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742335_1511 (size=30949) 2024-12-11T20:53:17,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742335_1511 (size=30949) 2024-12-11T20:53:17,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742336_1512 (size=1597213) 2024-12-11T20:53:17,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742336_1512 (size=1597213) 2024-12-11T20:53:17,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742336_1512 (size=1597213) 2024-12-11T20:53:17,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742337_1513 (size=4695811) 2024-12-11T20:53:17,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742337_1513 (size=4695811) 2024-12-11T20:53:17,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742337_1513 (size=4695811) 2024-12-11T20:53:17,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742338_1514 (size=232957) 2024-12-11T20:53:17,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742338_1514 (size=232957) 2024-12-11T20:53:17,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742338_1514 (size=232957) 2024-12-11T20:53:17,520 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0009_000001 (auth:SIMPLE) from 127.0.0.1:43462 2024-12-11T20:53:17,545 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_2/usercache/jenkins/appcache/application_1733950008312_0009/container_1733950008312_0009_01_000001/launch_container.sh] 2024-12-11T20:53:17,545 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_2/usercache/jenkins/appcache/application_1733950008312_0009/container_1733950008312_0009_01_000001/container_tokens] 2024-12-11T20:53:17,545 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_2/usercache/jenkins/appcache/application_1733950008312_0009/container_1733950008312_0009_01_000001/sysfs] 2024-12-11T20:53:17,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742339_1515 (size=127628) 2024-12-11T20:53:17,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742339_1515 (size=127628) 2024-12-11T20:53:17,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742339_1515 (size=127628) 2024-12-11T20:53:17,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742340_1516 (size=20406) 2024-12-11T20:53:17,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742340_1516 (size=20406) 2024-12-11T20:53:17,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742340_1516 (size=20406) 2024-12-11T20:53:17,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742341_1517 (size=6426228) 2024-12-11T20:53:17,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742341_1517 (size=6426228) 2024-12-11T20:53:17,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742341_1517 (size=6426228) 2024-12-11T20:53:17,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742342_1518 (size=5175431) 2024-12-11T20:53:17,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742342_1518 (size=5175431) 2024-12-11T20:53:17,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742342_1518 (size=5175431) 2024-12-11T20:53:17,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742343_1519 (size=217634) 2024-12-11T20:53:17,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742343_1519 (size=217634) 2024-12-11T20:53:17,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742343_1519 (size=217634) 2024-12-11T20:53:17,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742344_1520 (size=1832290) 2024-12-11T20:53:17,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742344_1520 (size=1832290) 2024-12-11T20:53:17,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742344_1520 (size=1832290) 2024-12-11T20:53:17,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742345_1521 (size=322274) 2024-12-11T20:53:17,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742345_1521 (size=322274) 2024-12-11T20:53:17,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742345_1521 (size=322274) 2024-12-11T20:53:17,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742346_1522 (size=503880) 2024-12-11T20:53:17,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742346_1522 (size=503880) 2024-12-11T20:53:17,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742346_1522 (size=503880) 2024-12-11T20:53:17,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742347_1523 (size=29229) 2024-12-11T20:53:17,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742347_1523 (size=29229) 2024-12-11T20:53:17,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742347_1523 (size=29229) 2024-12-11T20:53:18,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742348_1524 (size=24096) 2024-12-11T20:53:18,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742348_1524 (size=24096) 2024-12-11T20:53:18,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742348_1524 (size=24096) 2024-12-11T20:53:18,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742349_1525 (size=111872) 2024-12-11T20:53:18,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742349_1525 (size=111872) 2024-12-11T20:53:18,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742349_1525 (size=111872) 2024-12-11T20:53:18,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742350_1526 (size=45609) 2024-12-11T20:53:18,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742350_1526 (size=45609) 2024-12-11T20:53:18,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742350_1526 (size=45609) 2024-12-11T20:53:18,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742351_1527 (size=136454) 2024-12-11T20:53:18,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742351_1527 (size=136454) 2024-12-11T20:53:18,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742351_1527 (size=136454) 2024-12-11T20:53:18,234 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T20:53:18,237 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-11T20:53:18,244 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-11T20:53:18,244 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-11T20:53:18,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742352_1528 (size=441) 2024-12-11T20:53:18,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742352_1528 (size=441) 2024-12-11T20:53:18,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742352_1528 (size=441) 2024-12-11T20:53:18,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742353_1529 (size=21) 2024-12-11T20:53:18,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742353_1529 (size=21) 2024-12-11T20:53:18,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742353_1529 (size=21) 2024-12-11T20:53:19,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742354_1530 (size=303996) 2024-12-11T20:53:19,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742354_1530 (size=303996) 2024-12-11T20:53:19,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742354_1530 (size=303996) 2024-12-11T20:53:19,218 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T20:53:19,218 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T20:53:19,516 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0010_000001 (auth:SIMPLE) from 127.0.0.1:48756 2024-12-11T20:53:25,472 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 58264c5b234b4bd11beed47f41492f9c, had cached 0 bytes from a total of 8188 2024-12-11T20:53:25,473 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 08787acc4c8783f43f2b29bbed40a0de, had cached 0 bytes from a total of 5422 2024-12-11T20:53:30,855 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0010_000001 (auth:SIMPLE) from 127.0.0.1:49184 2024-12-11T20:53:31,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742355_1531 (size=349694) 2024-12-11T20:53:31,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742355_1531 (size=349694) 2024-12-11T20:53:31,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742355_1531 (size=349694) 2024-12-11T20:53:31,790 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T20:53:33,156 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0010_000001 (auth:SIMPLE) from 127.0.0.1:53774 2024-12-11T20:53:33,157 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0010_000001 (auth:SIMPLE) from 127.0.0.1:53206 2024-12-11T20:53:39,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742356_1532 (size=8258) 2024-12-11T20:53:39,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742356_1532 (size=8258) 2024-12-11T20:53:39,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742356_1532 (size=8258) 2024-12-11T20:53:39,474 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_0/usercache/jenkins/appcache/application_1733950008312_0010/container_1733950008312_0010_01_000002/launch_container.sh] 2024-12-11T20:53:39,474 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_0/usercache/jenkins/appcache/application_1733950008312_0010/container_1733950008312_0010_01_000002/container_tokens] 2024-12-11T20:53:39,474 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_0/usercache/jenkins/appcache/application_1733950008312_0010/container_1733950008312_0010_01_000002/sysfs] 2024-12-11T20:53:41,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742358_1534 (size=5356) 2024-12-11T20:53:41,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742358_1534 (size=5356) 2024-12-11T20:53:41,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742358_1534 (size=5356) 2024-12-11T20:53:41,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742357_1533 (size=22154) 2024-12-11T20:53:41,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742357_1533 (size=22154) 2024-12-11T20:53:41,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742357_1533 (size=22154) 2024-12-11T20:53:41,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742359_1535 (size=463) 2024-12-11T20:53:41,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742359_1535 (size=463) 2024-12-11T20:53:41,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742359_1535 (size=463) 2024-12-11T20:53:42,052 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_3/usercache/jenkins/appcache/application_1733950008312_0010/container_1733950008312_0010_01_000003/launch_container.sh] 2024-12-11T20:53:42,052 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_3/usercache/jenkins/appcache/application_1733950008312_0010/container_1733950008312_0010_01_000003/container_tokens] 2024-12-11T20:53:42,052 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_3/usercache/jenkins/appcache/application_1733950008312_0010/container_1733950008312_0010_01_000003/sysfs] 2024-12-11T20:53:42,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742360_1536 (size=22154) 2024-12-11T20:53:42,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742360_1536 (size=22154) 2024-12-11T20:53:42,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742360_1536 (size=22154) 2024-12-11T20:53:42,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742361_1537 (size=349694) 2024-12-11T20:53:42,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742361_1537 (size=349694) 2024-12-11T20:53:42,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742361_1537 (size=349694) 2024-12-11T20:53:42,498 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0010_000001 (auth:SIMPLE) from 127.0.0.1:35572 2024-12-11T20:53:44,603 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T20:53:44,632 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T20:53:44,663 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportWithChecksum 2024-12-11T20:53:44,664 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T20:53:44,669 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T20:53:44,670 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_754086280_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-11T20:53:44,670 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-11T20:53:44,671 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-11T20:53:44,671 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_754086280_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950393057/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950393057/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-11T20:53:44,675 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950393057/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-11T20:53:44,675 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950393057/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-11T20:53:44,699 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-12-11T20:53:44,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=224, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum 2024-12-11T20:53:44,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=224 2024-12-11T20:53:44,712 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950424711"}]},"ts":"1733950424711"} 2024-12-11T20:53:44,717 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-12-11T20:53:44,717 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-12-11T20:53:44,718 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=225, ppid=224, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-12-11T20:53:44,723 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=226, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=85706d21c97dbe3e5e92bf8ae7406a07, UNASSIGN}, {pid=227, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=b5b4959b71b91bebe4a19decdaac194a, UNASSIGN}] 2024-12-11T20:53:44,725 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=227, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=b5b4959b71b91bebe4a19decdaac194a, UNASSIGN 2024-12-11T20:53:44,726 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=226, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=85706d21c97dbe3e5e92bf8ae7406a07, UNASSIGN 2024-12-11T20:53:44,727 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=226 updating hbase:meta row=85706d21c97dbe3e5e92bf8ae7406a07, regionState=CLOSING, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:53:44,727 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=b5b4959b71b91bebe4a19decdaac194a, regionState=CLOSING, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:53:44,731 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41779 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=5be53b084ac7,39865,1733949995112, table=testtb-testExportWithChecksum, region=85706d21c97dbe3e5e92bf8ae7406a07. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-11T20:53:44,736 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41779 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=5be53b084ac7,40781,1733949995244, table=testtb-testExportWithChecksum, region=b5b4959b71b91bebe4a19decdaac194a. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-11T20:53:44,737 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=226, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=85706d21c97dbe3e5e92bf8ae7406a07, UNASSIGN because future has completed 2024-12-11T20:53:44,740 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=227, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=b5b4959b71b91bebe4a19decdaac194a, UNASSIGN because future has completed 2024-12-11T20:53:44,741 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T20:53:44,741 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=228, ppid=227, state=RUNNABLE, hasLock=false; CloseRegionProcedure b5b4959b71b91bebe4a19decdaac194a, server=5be53b084ac7,40781,1733949995244}] 2024-12-11T20:53:44,744 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T20:53:44,744 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=229, ppid=226, state=RUNNABLE, hasLock=false; CloseRegionProcedure 85706d21c97dbe3e5e92bf8ae7406a07, server=5be53b084ac7,39865,1733949995112}] 2024-12-11T20:53:44,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=224 2024-12-11T20:53:44,911 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(122): Close b5b4959b71b91bebe4a19decdaac194a 2024-12-11T20:53:44,911 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T20:53:44,912 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1722): Closing b5b4959b71b91bebe4a19decdaac194a, disabling compactions & flushes 2024-12-11T20:53:44,912 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a. 2024-12-11T20:53:44,912 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a. 2024-12-11T20:53:44,912 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a. after waiting 0 ms 2024-12-11T20:53:44,912 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a. 2024-12-11T20:53:44,925 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] handler.UnassignRegionHandler(122): Close 85706d21c97dbe3e5e92bf8ae7406a07 2024-12-11T20:53:44,926 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T20:53:44,926 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1722): Closing 85706d21c97dbe3e5e92bf8ae7406a07, disabling compactions & flushes 2024-12-11T20:53:44,926 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07. 2024-12-11T20:53:44,926 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07. 2024-12-11T20:53:44,926 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07. after waiting 0 ms 2024-12-11T20:53:44,926 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07. 2024-12-11T20:53:44,955 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/b5b4959b71b91bebe4a19decdaac194a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T20:53:44,956 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:53:44,957 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a. 2024-12-11T20:53:44,957 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1676): Region close journal for b5b4959b71b91bebe4a19decdaac194a: Waiting for close lock at 1733950424912Running coprocessor pre-close hooks at 1733950424912Disabling compacts and flushes for region at 1733950424912Disabling writes for close at 1733950424912Writing region close event to WAL at 1733950424947 (+35 ms)Running coprocessor post-close hooks at 1733950424956 (+9 ms)Closed at 1733950424956 2024-12-11T20:53:44,962 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=b5b4959b71b91bebe4a19decdaac194a, regionState=CLOSED 2024-12-11T20:53:44,965 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(157): Closed b5b4959b71b91bebe4a19decdaac194a 2024-12-11T20:53:44,966 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=228, ppid=227, state=RUNNABLE, hasLock=false; CloseRegionProcedure b5b4959b71b91bebe4a19decdaac194a, server=5be53b084ac7,40781,1733949995244 because future has completed 2024-12-11T20:53:44,969 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/85706d21c97dbe3e5e92bf8ae7406a07/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T20:53:44,970 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:53:44,970 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07. 2024-12-11T20:53:44,970 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1676): Region close journal for 85706d21c97dbe3e5e92bf8ae7406a07: Waiting for close lock at 1733950424926Running coprocessor pre-close hooks at 1733950424926Disabling compacts and flushes for region at 1733950424926Disabling writes for close at 1733950424926Writing region close event to WAL at 1733950424949 (+23 ms)Running coprocessor post-close hooks at 1733950424970 (+21 ms)Closed at 1733950424970 2024-12-11T20:53:44,973 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] handler.UnassignRegionHandler(157): Closed 85706d21c97dbe3e5e92bf8ae7406a07 2024-12-11T20:53:44,974 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=226 updating hbase:meta row=85706d21c97dbe3e5e92bf8ae7406a07, regionState=CLOSED 2024-12-11T20:53:44,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=229, ppid=226, state=RUNNABLE, hasLock=false; CloseRegionProcedure 85706d21c97dbe3e5e92bf8ae7406a07, server=5be53b084ac7,39865,1733949995112 because future has completed 2024-12-11T20:53:44,987 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=228, resume processing ppid=227 2024-12-11T20:53:44,989 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=228, ppid=227, state=SUCCESS, hasLock=false; CloseRegionProcedure b5b4959b71b91bebe4a19decdaac194a, server=5be53b084ac7,40781,1733949995244 in 237 msec 2024-12-11T20:53:44,990 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=227, ppid=225, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=b5b4959b71b91bebe4a19decdaac194a, UNASSIGN in 264 msec 2024-12-11T20:53:44,990 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=229, resume processing ppid=226 2024-12-11T20:53:44,990 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=229, ppid=226, state=SUCCESS, hasLock=false; CloseRegionProcedure 85706d21c97dbe3e5e92bf8ae7406a07, server=5be53b084ac7,39865,1733949995112 in 242 msec 2024-12-11T20:53:44,996 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=226, resume processing ppid=225 2024-12-11T20:53:44,996 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=226, ppid=225, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=85706d21c97dbe3e5e92bf8ae7406a07, UNASSIGN in 268 msec 2024-12-11T20:53:44,999 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=225, resume processing ppid=224 2024-12-11T20:53:44,999 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=225, ppid=224, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 279 msec 2024-12-11T20:53:45,001 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950425001"}]},"ts":"1733950425001"} 2024-12-11T20:53:45,010 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-12-11T20:53:45,010 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-12-11T20:53:45,015 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=224, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum in 313 msec 2024-12-11T20:53:45,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=224 2024-12-11T20:53:45,033 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-11T20:53:45,033 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-12-11T20:53:45,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=230, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-11T20:53:45,049 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=230, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-11T20:53:45,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithChecksum 2024-12-11T20:53:45,051 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=230, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-11T20:53:45,061 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-12-11T20:53:45,063 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/85706d21c97dbe3e5e92bf8ae7406a07 2024-12-11T20:53:45,069 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/b5b4959b71b91bebe4a19decdaac194a 2024-12-11T20:53:45,073 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/b5b4959b71b91bebe4a19decdaac194a/cf, FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/b5b4959b71b91bebe4a19decdaac194a/recovered.edits] 2024-12-11T20:53:45,074 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/85706d21c97dbe3e5e92bf8ae7406a07/cf, FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/85706d21c97dbe3e5e92bf8ae7406a07/recovered.edits] 2024-12-11T20:53:45,084 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/b5b4959b71b91bebe4a19decdaac194a/cf/3b6247262ece4e749b0a7bd171a69454 to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportWithChecksum/b5b4959b71b91bebe4a19decdaac194a/cf/3b6247262ece4e749b0a7bd171a69454 2024-12-11T20:53:45,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-11T20:53:45,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-11T20:53:45,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-11T20:53:45,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-11T20:53:45,099 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/85706d21c97dbe3e5e92bf8ae7406a07/cf/359d2fa4b52b46668f145ebe04a870f8 to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportWithChecksum/85706d21c97dbe3e5e92bf8ae7406a07/cf/359d2fa4b52b46668f145ebe04a870f8 2024-12-11T20:53:45,099 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/b5b4959b71b91bebe4a19decdaac194a/recovered.edits/9.seqid to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportWithChecksum/b5b4959b71b91bebe4a19decdaac194a/recovered.edits/9.seqid 2024-12-11T20:53:45,100 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/b5b4959b71b91bebe4a19decdaac194a 2024-12-11T20:53:45,100 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-11T20:53:45,101 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-11T20:53:45,103 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-11T20:53:45,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:53:45,110 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data null 2024-12-11T20:53:45,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-11T20:53:45,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:53:45,110 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-11T20:53:45,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-11T20:53:45,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:53:45,113 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:53:45,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-11T20:53:45,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:53:45,113 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:53:45,115 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/85706d21c97dbe3e5e92bf8ae7406a07/recovered.edits/9.seqid to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportWithChecksum/85706d21c97dbe3e5e92bf8ae7406a07/recovered.edits/9.seqid 2024-12-11T20:53:45,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=230 2024-12-11T20:53:45,116 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:53:45,116 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:53:45,119 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportWithChecksum/85706d21c97dbe3e5e92bf8ae7406a07 2024-12-11T20:53:45,123 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-12-11T20:53:45,129 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=230, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-11T20:53:45,134 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-12-11T20:53:45,138 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithChecksum' descriptor. 2024-12-11T20:53:45,140 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=230, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-11T20:53:45,140 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithChecksum' from region states. 2024-12-11T20:53:45,140 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733950425140"}]},"ts":"9223372036854775807"} 2024-12-11T20:53:45,140 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733950425140"}]},"ts":"9223372036854775807"} 2024-12-11T20:53:45,144 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T20:53:45,144 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 85706d21c97dbe3e5e92bf8ae7406a07, NAME => 'testtb-testExportWithChecksum,,1733950341444.85706d21c97dbe3e5e92bf8ae7406a07.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => b5b4959b71b91bebe4a19decdaac194a, NAME => 'testtb-testExportWithChecksum,1,1733950341444.b5b4959b71b91bebe4a19decdaac194a.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T20:53:45,144 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithChecksum' as deleted. 2024-12-11T20:53:45,144 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733950425144"}]},"ts":"9223372036854775807"} 2024-12-11T20:53:45,147 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithChecksum state from META 2024-12-11T20:53:45,150 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=230, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-11T20:53:45,152 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=230, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum in 117 msec 2024-12-11T20:53:45,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=230 2024-12-11T20:53:45,226 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithChecksum 2024-12-11T20:53:45,226 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-11T20:53:45,237 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" type: DISABLED 2024-12-11T20:53:45,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-12-11T20:53:45,245 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" type: DISABLED 2024-12-11T20:53:45,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithChecksum 2024-12-11T20:53:45,289 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=811 (was 814), OpenFileDescriptor=797 (was 804), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1396 (was 834) - SystemLoadAverage LEAK? -, ProcessCount=33 (was 14) - ProcessCount LEAK? -, AvailableMemoryMB=1074 (was 1202) 2024-12-11T20:53:45,289 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=811 is superior to 500 2024-12-11T20:53:45,335 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=811, OpenFileDescriptor=797, MaxFileDescriptor=1048576, SystemLoadAverage=1396, ProcessCount=31, AvailableMemoryMB=1069 2024-12-11T20:53:45,336 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=811 is superior to 500 2024-12-11T20:53:45,341 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T20:53:45,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=231, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T20:53:45,356 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T20:53:45,356 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:53:45,356 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 231 2024-12-11T20:53:45,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=231 2024-12-11T20:53:45,368 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T20:53:45,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=231 2024-12-11T20:53:45,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742362_1538 (size=418) 2024-12-11T20:53:45,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742362_1538 (size=418) 2024-12-11T20:53:45,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742362_1538 (size=418) 2024-12-11T20:53:45,539 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => e2ff4c9ce824780c6b1399c9899ab461, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:53:45,549 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 3a20a5de6b946146e26495f720f58e54, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:53:45,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=231 2024-12-11T20:53:45,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742364_1540 (size=79) 2024-12-11T20:53:45,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742364_1540 (size=79) 2024-12-11T20:53:45,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742364_1540 (size=79) 2024-12-11T20:53:45,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742363_1539 (size=79) 2024-12-11T20:53:45,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742363_1539 (size=79) 2024-12-11T20:53:45,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742363_1539 (size=79) 2024-12-11T20:53:45,696 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:53:45,696 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1722): Closing e2ff4c9ce824780c6b1399c9899ab461, disabling compactions & flushes 2024-12-11T20:53:45,696 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461. 2024-12-11T20:53:45,696 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461. 2024-12-11T20:53:45,696 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461. after waiting 0 ms 2024-12-11T20:53:45,696 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461. 2024-12-11T20:53:45,696 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461. 2024-12-11T20:53:45,696 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1676): Region close journal for e2ff4c9ce824780c6b1399c9899ab461: Waiting for close lock at 1733950425696Disabling compacts and flushes for region at 1733950425696Disabling writes for close at 1733950425696Writing region close event to WAL at 1733950425696Closed at 1733950425696 2024-12-11T20:53:45,700 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:53:45,700 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1722): Closing 3a20a5de6b946146e26495f720f58e54, disabling compactions & flushes 2024-12-11T20:53:45,700 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54. 2024-12-11T20:53:45,700 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54. 2024-12-11T20:53:45,700 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54. after waiting 0 ms 2024-12-11T20:53:45,700 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54. 2024-12-11T20:53:45,700 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54. 2024-12-11T20:53:45,700 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1676): Region close journal for 3a20a5de6b946146e26495f720f58e54: Waiting for close lock at 1733950425700Disabling compacts and flushes for region at 1733950425700Disabling writes for close at 1733950425700Writing region close event to WAL at 1733950425700Closed at 1733950425700 2024-12-11T20:53:45,702 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T20:53:45,702 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733950425702"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950425702"}]},"ts":"1733950425702"} 2024-12-11T20:53:45,703 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733950425702"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733950425702"}]},"ts":"1733950425702"} 2024-12-11T20:53:45,708 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T20:53:45,713 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T20:53:45,713 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950425713"}]},"ts":"1733950425713"} 2024-12-11T20:53:45,717 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-12-11T20:53:45,718 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {5be53b084ac7=0} racks are {/default-rack=0} 2024-12-11T20:53:45,719 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T20:53:45,719 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T20:53:45,719 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T20:53:45,719 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T20:53:45,719 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T20:53:45,719 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T20:53:45,719 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T20:53:45,719 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T20:53:45,720 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T20:53:45,720 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T20:53:45,720 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e2ff4c9ce824780c6b1399c9899ab461, ASSIGN}, {pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3a20a5de6b946146e26495f720f58e54, ASSIGN}] 2024-12-11T20:53:45,722 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3a20a5de6b946146e26495f720f58e54, ASSIGN 2024-12-11T20:53:45,724 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e2ff4c9ce824780c6b1399c9899ab461, ASSIGN 2024-12-11T20:53:45,724 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3a20a5de6b946146e26495f720f58e54, ASSIGN; state=OFFLINE, location=5be53b084ac7,40781,1733949995244; forceNewPlan=false, retain=false 2024-12-11T20:53:45,725 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e2ff4c9ce824780c6b1399c9899ab461, ASSIGN; state=OFFLINE, location=5be53b084ac7,39865,1733949995112; forceNewPlan=false, retain=false 2024-12-11T20:53:45,875 INFO [5be53b084ac7:41779 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T20:53:45,875 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=233 updating hbase:meta row=3a20a5de6b946146e26495f720f58e54, regionState=OPENING, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:53:45,876 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=232 updating hbase:meta row=e2ff4c9ce824780c6b1399c9899ab461, regionState=OPENING, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:53:45,888 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3a20a5de6b946146e26495f720f58e54, ASSIGN because future has completed 2024-12-11T20:53:45,895 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e2ff4c9ce824780c6b1399c9899ab461, ASSIGN because future has completed 2024-12-11T20:53:45,899 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=234, ppid=233, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3a20a5de6b946146e26495f720f58e54, server=5be53b084ac7,40781,1733949995244}] 2024-12-11T20:53:45,907 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=235, ppid=232, state=RUNNABLE, hasLock=false; OpenRegionProcedure e2ff4c9ce824780c6b1399c9899ab461, server=5be53b084ac7,39865,1733949995112}] 2024-12-11T20:53:45,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=231 2024-12-11T20:53:46,079 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54. 2024-12-11T20:53:46,079 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(7752): Opening region: {ENCODED => 3a20a5de6b946146e26495f720f58e54, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T20:53:46,080 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54. service=AccessControlService 2024-12-11T20:53:46,080 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:53:46,080 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 3a20a5de6b946146e26495f720f58e54 2024-12-11T20:53:46,080 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:53:46,080 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(7794): checking encryption for 3a20a5de6b946146e26495f720f58e54 2024-12-11T20:53:46,081 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(7797): checking classloading for 3a20a5de6b946146e26495f720f58e54 2024-12-11T20:53:46,081 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461. 2024-12-11T20:53:46,081 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(7752): Opening region: {ENCODED => e2ff4c9ce824780c6b1399c9899ab461, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T20:53:46,081 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461. service=AccessControlService 2024-12-11T20:53:46,081 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T20:53:46,082 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp e2ff4c9ce824780c6b1399c9899ab461 2024-12-11T20:53:46,082 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T20:53:46,082 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(7794): checking encryption for e2ff4c9ce824780c6b1399c9899ab461 2024-12-11T20:53:46,082 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(7797): checking classloading for e2ff4c9ce824780c6b1399c9899ab461 2024-12-11T20:53:46,100 INFO [StoreOpener-e2ff4c9ce824780c6b1399c9899ab461-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e2ff4c9ce824780c6b1399c9899ab461 2024-12-11T20:53:46,112 INFO [StoreOpener-3a20a5de6b946146e26495f720f58e54-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3a20a5de6b946146e26495f720f58e54 2024-12-11T20:53:46,114 INFO [StoreOpener-3a20a5de6b946146e26495f720f58e54-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3a20a5de6b946146e26495f720f58e54 columnFamilyName cf 2024-12-11T20:53:46,115 INFO [StoreOpener-e2ff4c9ce824780c6b1399c9899ab461-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e2ff4c9ce824780c6b1399c9899ab461 columnFamilyName cf 2024-12-11T20:53:46,115 DEBUG [StoreOpener-e2ff4c9ce824780c6b1399c9899ab461-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:53:46,115 DEBUG [StoreOpener-3a20a5de6b946146e26495f720f58e54-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T20:53:46,116 INFO [StoreOpener-e2ff4c9ce824780c6b1399c9899ab461-1 {}] regionserver.HStore(327): Store=e2ff4c9ce824780c6b1399c9899ab461/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:53:46,116 INFO [StoreOpener-3a20a5de6b946146e26495f720f58e54-1 {}] regionserver.HStore(327): Store=3a20a5de6b946146e26495f720f58e54/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T20:53:46,116 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1038): replaying wal for e2ff4c9ce824780c6b1399c9899ab461 2024-12-11T20:53:46,116 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1038): replaying wal for 3a20a5de6b946146e26495f720f58e54 2024-12-11T20:53:46,117 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/3a20a5de6b946146e26495f720f58e54 2024-12-11T20:53:46,117 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2ff4c9ce824780c6b1399c9899ab461 2024-12-11T20:53:46,117 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2ff4c9ce824780c6b1399c9899ab461 2024-12-11T20:53:46,117 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/3a20a5de6b946146e26495f720f58e54 2024-12-11T20:53:46,117 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1048): stopping wal replay for 3a20a5de6b946146e26495f720f58e54 2024-12-11T20:53:46,117 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1048): stopping wal replay for e2ff4c9ce824780c6b1399c9899ab461 2024-12-11T20:53:46,117 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1060): Cleaning up temporary data for e2ff4c9ce824780c6b1399c9899ab461 2024-12-11T20:53:46,117 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1060): Cleaning up temporary data for 3a20a5de6b946146e26495f720f58e54 2024-12-11T20:53:46,119 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1093): writing seq id for e2ff4c9ce824780c6b1399c9899ab461 2024-12-11T20:53:46,124 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1093): writing seq id for 3a20a5de6b946146e26495f720f58e54 2024-12-11T20:53:46,128 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2ff4c9ce824780c6b1399c9899ab461/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:53:46,128 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1114): Opened e2ff4c9ce824780c6b1399c9899ab461; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59130799, jitterRate=-0.1188824325799942}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:53:46,128 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e2ff4c9ce824780c6b1399c9899ab461 2024-12-11T20:53:46,129 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1006): Region open journal for e2ff4c9ce824780c6b1399c9899ab461: Running coprocessor pre-open hook at 1733950426082Writing region info on filesystem at 1733950426082Initializing all the Stores at 1733950426083 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950426083Cleaning up temporary data from old regions at 1733950426117 (+34 ms)Running coprocessor post-open hooks at 1733950426128 (+11 ms)Region opened successfully at 1733950426129 (+1 ms) 2024-12-11T20:53:46,135 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461., pid=235, masterSystemTime=1733950426075 2024-12-11T20:53:46,138 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/3a20a5de6b946146e26495f720f58e54/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T20:53:46,139 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1114): Opened 3a20a5de6b946146e26495f720f58e54; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64082881, jitterRate=-0.04509066045284271}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T20:53:46,139 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3a20a5de6b946146e26495f720f58e54 2024-12-11T20:53:46,139 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1006): Region open journal for 3a20a5de6b946146e26495f720f58e54: Running coprocessor pre-open hook at 1733950426081Writing region info on filesystem at 1733950426081Initializing all the Stores at 1733950426084 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733950426084Cleaning up temporary data from old regions at 1733950426117 (+33 ms)Running coprocessor post-open hooks at 1733950426139 (+22 ms)Region opened successfully at 1733950426139 2024-12-11T20:53:46,141 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=232 updating hbase:meta row=e2ff4c9ce824780c6b1399c9899ab461, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:53:46,141 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461. 2024-12-11T20:53:46,141 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461. 2024-12-11T20:53:46,143 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54., pid=234, masterSystemTime=1733950426075 2024-12-11T20:53:46,145 DEBUG [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54. 2024-12-11T20:53:46,145 INFO [RS_OPEN_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54. 2024-12-11T20:53:46,146 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=233 updating hbase:meta row=3a20a5de6b946146e26495f720f58e54, regionState=OPEN, openSeqNum=2, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:53:46,149 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=235, ppid=232, state=RUNNABLE, hasLock=false; OpenRegionProcedure e2ff4c9ce824780c6b1399c9899ab461, server=5be53b084ac7,39865,1733949995112 because future has completed 2024-12-11T20:53:46,152 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=234, ppid=233, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3a20a5de6b946146e26495f720f58e54, server=5be53b084ac7,40781,1733949995244 because future has completed 2024-12-11T20:53:46,160 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=235, resume processing ppid=232 2024-12-11T20:53:46,160 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=234, resume processing ppid=233 2024-12-11T20:53:46,160 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=235, ppid=232, state=SUCCESS, hasLock=false; OpenRegionProcedure e2ff4c9ce824780c6b1399c9899ab461, server=5be53b084ac7,39865,1733949995112 in 246 msec 2024-12-11T20:53:46,160 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=234, ppid=233, state=SUCCESS, hasLock=false; OpenRegionProcedure 3a20a5de6b946146e26495f720f58e54, server=5be53b084ac7,40781,1733949995244 in 255 msec 2024-12-11T20:53:46,165 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=233, ppid=231, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3a20a5de6b946146e26495f720f58e54, ASSIGN in 441 msec 2024-12-11T20:53:46,168 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=232, resume processing ppid=231 2024-12-11T20:53:46,168 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=232, ppid=231, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e2ff4c9ce824780c6b1399c9899ab461, ASSIGN in 441 msec 2024-12-11T20:53:46,169 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T20:53:46,170 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950426169"}]},"ts":"1733950426169"} 2024-12-11T20:53:46,172 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-12-11T20:53:46,173 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T20:53:46,173 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-12-11T20:53:46,186 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-11T20:53:46,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:53:46,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:53:46,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:53:46,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:53:46,221 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:53:46,221 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:53:46,221 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-11T20:53:46,221 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:53:46,221 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-11T20:53:46,221 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-11T20:53:46,222 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:53:46,222 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=231, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 875 msec 2024-12-11T20:53:46,222 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-11T20:53:46,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=231 2024-12-11T20:53:46,506 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-11T20:53:46,506 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithSkipTmp get assigned. Timeout = 60000ms 2024-12-11T20:53:46,506 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:53:46,526 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40781 {}] regionserver.StoreScanner(1138): Switch to stream read (scanned=32844 bytes) of info 2024-12-11T20:53:46,531 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned to meta. Checking AM states. 2024-12-11T20:53:46,531 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:53:46,531 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned. 2024-12-11T20:53:46,532 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-11T20:53:46,536 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-11T20:53:46,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733950426536 (current time:1733950426536). 2024-12-11T20:53:46,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T20:53:46,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-11T20:53:46,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T20:53:46,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72e439e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:53:46,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:53:46,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:53:46,556 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:53:46,556 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:53:46,557 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:53:46,557 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f037759, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:53:46,557 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:53:46,557 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:53:46,558 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:53:46,559 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57444, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:53:46,560 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d739a59, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:53:46,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:53:46,562 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:53:46,562 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:53:46,564 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33746, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:53:46,566 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779. 2024-12-11T20:53:46,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:53:46,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:53:46,566 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:53:46,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:53:46,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cbf3192, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:53:46,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:53:46,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:53:46,573 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:53:46,574 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:53:46,574 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:53:46,574 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f8532ef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:53:46,574 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:53:46,575 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:53:46,575 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:53:46,580 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57472, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:53:46,581 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71498f6c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:53:46,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:53:46,584 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:53:46,584 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:53:46,586 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33754, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:53:46,588 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:53:46,589 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:53:46,590 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57958, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:53:46,595 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779. 2024-12-11T20:53:46,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:53:46,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:53:46,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:53:46,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-11T20:53:46,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T20:53:46,599 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:53:46,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=236, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-11T20:53:46,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 236 2024-12-11T20:53:46,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=236 2024-12-11T20:53:46,627 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T20:53:46,639 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T20:53:46,657 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T20:53:46,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=236 2024-12-11T20:53:46,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742365_1541 (size=203) 2024-12-11T20:53:46,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742365_1541 (size=203) 2024-12-11T20:53:46,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742365_1541 (size=203) 2024-12-11T20:53:46,771 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T20:53:46,771 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=237, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e2ff4c9ce824780c6b1399c9899ab461}, {pid=238, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3a20a5de6b946146e26495f720f58e54}] 2024-12-11T20:53:46,779 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=238, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3a20a5de6b946146e26495f720f58e54 2024-12-11T20:53:46,780 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=237, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e2ff4c9ce824780c6b1399c9899ab461 2024-12-11T20:53:46,917 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-12-11T20:53:46,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=236 2024-12-11T20:53:46,939 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40781 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=238 2024-12-11T20:53:46,939 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=237 2024-12-11T20:53:46,939 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461. 2024-12-11T20:53:46,939 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] regionserver.HRegion(2603): Flush status journal for e2ff4c9ce824780c6b1399c9899ab461: 2024-12-11T20:53:46,939 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-11T20:53:46,940 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T20:53:46,940 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:53:46,940 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T20:53:46,943 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54. 2024-12-11T20:53:46,943 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] regionserver.HRegion(2603): Flush status journal for 3a20a5de6b946146e26495f720f58e54: 2024-12-11T20:53:46,943 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-11T20:53:46,943 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T20:53:46,943 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:53:46,943 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T20:53:47,012 DEBUG [master/5be53b084ac7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region e2ff4c9ce824780c6b1399c9899ab461 changed from -1.0 to 0.0, refreshing cache 2024-12-11T20:53:47,012 DEBUG [master/5be53b084ac7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 3a20a5de6b946146e26495f720f58e54 changed from -1.0 to 0.0, refreshing cache 2024-12-11T20:53:47,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742366_1542 (size=82) 2024-12-11T20:53:47,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742366_1542 (size=82) 2024-12-11T20:53:47,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742366_1542 (size=82) 2024-12-11T20:53:47,176 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461. 2024-12-11T20:53:47,176 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=237 2024-12-11T20:53:47,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=237 2024-12-11T20:53:47,177 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region e2ff4c9ce824780c6b1399c9899ab461 2024-12-11T20:53:47,177 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=237, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e2ff4c9ce824780c6b1399c9899ab461 2024-12-11T20:53:47,181 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=237, ppid=236, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e2ff4c9ce824780c6b1399c9899ab461 in 408 msec 2024-12-11T20:53:47,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742367_1543 (size=82) 2024-12-11T20:53:47,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742367_1543 (size=82) 2024-12-11T20:53:47,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742367_1543 (size=82) 2024-12-11T20:53:47,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=236 2024-12-11T20:53:47,633 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54. 2024-12-11T20:53:47,633 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=238 2024-12-11T20:53:47,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=238 2024-12-11T20:53:47,633 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 3a20a5de6b946146e26495f720f58e54 2024-12-11T20:53:47,634 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=238, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3a20a5de6b946146e26495f720f58e54 2024-12-11T20:53:47,638 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=238, resume processing ppid=236 2024-12-11T20:53:47,638 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=238, ppid=236, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3a20a5de6b946146e26495f720f58e54 in 864 msec 2024-12-11T20:53:47,638 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T20:53:47,653 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T20:53:47,655 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T20:53:47,655 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T20:53:47,657 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T20:53:47,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=236 2024-12-11T20:53:47,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742368_1544 (size=585) 2024-12-11T20:53:47,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742368_1544 (size=585) 2024-12-11T20:53:47,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742368_1544 (size=585) 2024-12-11T20:53:47,908 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T20:53:47,932 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T20:53:47,933 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T20:53:47,936 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T20:53:47,936 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 236 2024-12-11T20:53:47,939 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=236, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 1.3380 sec 2024-12-11T20:53:48,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=236 2024-12-11T20:53:48,770 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-11T20:53:48,775 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='07f1bfa72dbd456de6bf5b98866ce87a6', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:53:48,781 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='1748ce1c72ba84a2afd84948055acf04d', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:53:48,782 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='212337034cee0eded71b65833bcdd3ad3', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:53:48,782 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='3af77ef0cae4b6cc10bc396c48b11df0a', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:53:48,791 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39865 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T20:53:48,795 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='57b864d99719c8172aebb76efa63c0ed8', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:53:48,796 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='4a0fc6aab1e7c6cb8ab3b81dce17ffa7d', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:53:48,797 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='6cd08f9600bc05b18a8521b0f1b2b6b74', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:53:48,797 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40781 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T20:53:48,798 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='cf9f0ba2d8fc8c6fcfc84b6675d47676', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54., hostname=5be53b084ac7,40781,1733949995244, seqNum=2] 2024-12-11T20:53:48,800 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-11T20:53:48,803 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T20:53:48,803 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461. 2024-12-11T20:53:48,804 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T20:53:48,806 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-11T20:53:48,812 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0010_000001 (auth:SIMPLE) from 127.0.0.1:40922 2024-12-11T20:53:48,818 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-11T20:53:48,842 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-11T20:53:48,846 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_3/usercache/jenkins/appcache/application_1733950008312_0010/container_1733950008312_0010_01_000001/launch_container.sh] 2024-12-11T20:53:48,846 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_3/usercache/jenkins/appcache/application_1733950008312_0010/container_1733950008312_0010_01_000001/container_tokens] 2024-12-11T20:53:48,846 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_3/usercache/jenkins/appcache/application_1733950008312_0010/container_1733950008312_0010_01_000001/sysfs] 2024-12-11T20:53:48,854 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-11T20:53:48,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733950428854 (current time:1733950428854). 2024-12-11T20:53:48,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T20:53:48,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-11T20:53:48,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T20:53:48,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25bcd8d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:53:48,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:53:48,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:53:48,863 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:53:48,863 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:53:48,863 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:53:48,863 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2845d40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:53:48,863 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:53:48,864 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:53:48,864 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:53:48,865 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57488, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:53:48,867 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47833ec9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:53:48,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:53:48,871 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:53:48,872 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:53:48,873 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33768, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:53:48,874 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779. 2024-12-11T20:53:48,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:53:48,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:53:48,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:53:48,874 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:53:48,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@324f5892, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:53:48,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ClusterIdFetcher(90): Going to request 5be53b084ac7,41779,-1 for getting cluster id 2024-12-11T20:53:48,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T20:53:48,879 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '70de47e7-5307-4e5f-941e-8ba89cc45c97' 2024-12-11T20:53:48,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T20:53:48,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "70de47e7-5307-4e5f-941e-8ba89cc45c97" 2024-12-11T20:53:48,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37eb6a78, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:53:48,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5be53b084ac7,41779,-1] 2024-12-11T20:53:48,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T20:53:48,881 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:53:48,883 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57512, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T20:53:48,883 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ce0c470, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T20:53:48,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T20:53:48,886 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5be53b084ac7,40781,1733949995244, seqNum=-1] 2024-12-11T20:53:48,886 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:53:48,887 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33772, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:53:48,889 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706., hostname=5be53b084ac7,39865,1733949995112, seqNum=2] 2024-12-11T20:53:48,889 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T20:53:48,891 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57966, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T20:53:48,893 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779. 2024-12-11T20:53:48,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T20:53:48,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:53:48,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:53:48,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-11T20:53:48,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T20:53:48,894 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:53:48,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=239, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-11T20:53:48,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 239 2024-12-11T20:53:48,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-11T20:53:48,897 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T20:53:48,900 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T20:53:48,906 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T20:53:49,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-11T20:53:49,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742369_1545 (size=198) 2024-12-11T20:53:49,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742369_1545 (size=198) 2024-12-11T20:53:49,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742369_1545 (size=198) 2024-12-11T20:53:49,057 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T20:53:49,058 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e2ff4c9ce824780c6b1399c9899ab461}, {pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3a20a5de6b946146e26495f720f58e54}] 2024-12-11T20:53:49,059 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3a20a5de6b946146e26495f720f58e54 2024-12-11T20:53:49,059 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e2ff4c9ce824780c6b1399c9899ab461 2024-12-11T20:53:49,212 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39865 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=240 2024-12-11T20:53:49,212 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40781 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=241 2024-12-11T20:53:49,212 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461. 2024-12-11T20:53:49,212 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HRegion(2902): Flushing e2ff4c9ce824780c6b1399c9899ab461 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-11T20:53:49,213 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54. 2024-12-11T20:53:49,213 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HRegion(2902): Flushing 3a20a5de6b946146e26495f720f58e54 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-11T20:53:49,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-11T20:53:49,253 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/3a20a5de6b946146e26495f720f58e54/.tmp/cf/6724669562f54775a50d700fe56c004b is 71, key is 10e9c10caf131a1050076b6f44dfd854/cf:q/1733950428797/Put/seqid=0 2024-12-11T20:53:49,258 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2ff4c9ce824780c6b1399c9899ab461/.tmp/cf/6ca929e5f3204b2b9abd47cfb037b44f is 71, key is 003a22edf77c48b4162c6a890e34c0fa/cf:q/1733950428791/Put/seqid=0 2024-12-11T20:53:49,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742370_1546 (size=8256) 2024-12-11T20:53:49,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742370_1546 (size=8256) 2024-12-11T20:53:49,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742370_1546 (size=8256) 2024-12-11T20:53:49,406 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/3a20a5de6b946146e26495f720f58e54/.tmp/cf/6724669562f54775a50d700fe56c004b 2024-12-11T20:53:49,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742371_1547 (size=5354) 2024-12-11T20:53:49,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742371_1547 (size=5354) 2024-12-11T20:53:49,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742371_1547 (size=5354) 2024-12-11T20:53:49,432 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2ff4c9ce824780c6b1399c9899ab461/.tmp/cf/6ca929e5f3204b2b9abd47cfb037b44f 2024-12-11T20:53:49,439 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2ff4c9ce824780c6b1399c9899ab461/.tmp/cf/6ca929e5f3204b2b9abd47cfb037b44f as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2ff4c9ce824780c6b1399c9899ab461/cf/6ca929e5f3204b2b9abd47cfb037b44f 2024-12-11T20:53:49,450 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2ff4c9ce824780c6b1399c9899ab461/cf/6ca929e5f3204b2b9abd47cfb037b44f, entries=4, sequenceid=6, filesize=5.2 K 2024-12-11T20:53:49,454 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for e2ff4c9ce824780c6b1399c9899ab461 in 242ms, sequenceid=6, compaction requested=false 2024-12-11T20:53:49,454 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HRegion(2603): Flush status journal for e2ff4c9ce824780c6b1399c9899ab461: 2024-12-11T20:53:49,454 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-11T20:53:49,454 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T20:53:49,454 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:53:49,455 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/3a20a5de6b946146e26495f720f58e54/.tmp/cf/6724669562f54775a50d700fe56c004b as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/3a20a5de6b946146e26495f720f58e54/cf/6724669562f54775a50d700fe56c004b 2024-12-11T20:53:49,455 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2ff4c9ce824780c6b1399c9899ab461/cf/6ca929e5f3204b2b9abd47cfb037b44f] hfiles 2024-12-11T20:53:49,455 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2ff4c9ce824780c6b1399c9899ab461/cf/6ca929e5f3204b2b9abd47cfb037b44f for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T20:53:49,462 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/3a20a5de6b946146e26495f720f58e54/cf/6724669562f54775a50d700fe56c004b, entries=46, sequenceid=6, filesize=8.1 K 2024-12-11T20:53:49,463 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 3a20a5de6b946146e26495f720f58e54 in 249ms, sequenceid=6, compaction requested=false 2024-12-11T20:53:49,463 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HRegion(2603): Flush status journal for 3a20a5de6b946146e26495f720f58e54: 2024-12-11T20:53:49,463 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-11T20:53:49,464 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T20:53:49,464 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T20:53:49,464 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/3a20a5de6b946146e26495f720f58e54/cf/6724669562f54775a50d700fe56c004b] hfiles 2024-12-11T20:53:49,464 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/3a20a5de6b946146e26495f720f58e54/cf/6724669562f54775a50d700fe56c004b for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T20:53:49,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-11T20:53:49,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742372_1548 (size=121) 2024-12-11T20:53:49,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742372_1548 (size=121) 2024-12-11T20:53:49,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742372_1548 (size=121) 2024-12-11T20:53:49,583 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461. 2024-12-11T20:53:49,583 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=240 2024-12-11T20:53:49,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=240 2024-12-11T20:53:49,584 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region e2ff4c9ce824780c6b1399c9899ab461 2024-12-11T20:53:49,584 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e2ff4c9ce824780c6b1399c9899ab461 2024-12-11T20:53:49,587 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=240, ppid=239, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e2ff4c9ce824780c6b1399c9899ab461 in 528 msec 2024-12-11T20:53:49,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742373_1549 (size=121) 2024-12-11T20:53:49,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742373_1549 (size=121) 2024-12-11T20:53:49,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742373_1549 (size=121) 2024-12-11T20:53:49,609 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54. 2024-12-11T20:53:49,609 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5be53b084ac7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=241 2024-12-11T20:53:49,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster(4169): Remote procedure done, pid=241 2024-12-11T20:53:49,610 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 3a20a5de6b946146e26495f720f58e54 2024-12-11T20:53:49,610 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3a20a5de6b946146e26495f720f58e54 2024-12-11T20:53:49,614 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=241, resume processing ppid=239 2024-12-11T20:53:49,614 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T20:53:49,615 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T20:53:49,616 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T20:53:49,616 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T20:53:49,616 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=241, ppid=239, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3a20a5de6b946146e26495f720f58e54 in 554 msec 2024-12-11T20:53:49,617 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T20:53:49,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742374_1550 (size=663) 2024-12-11T20:53:49,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742374_1550 (size=663) 2024-12-11T20:53:49,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742374_1550 (size=663) 2024-12-11T20:53:49,771 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T20:53:49,794 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T20:53:49,795 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T20:53:49,808 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T20:53:49,808 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 239 2024-12-11T20:53:49,811 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=239, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 914 msec 2024-12-11T20:53:50,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-11T20:53:50,042 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-11T20:53:50,042 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950430042 2024-12-11T20:53:50,042 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:36717, tgtDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950430042, rawTgtDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950430042, srcFsUri=hdfs://localhost:36717, srcDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:53:50,153 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:36717, inputRoot=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5 2024-12-11T20:53:50,153 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_754086280_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950430042, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950430042/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T20:53:50,160 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T20:53:50,199 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950430042/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T20:53:50,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742375_1551 (size=198) 2024-12-11T20:53:50,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742375_1551 (size=198) 2024-12-11T20:53:50,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742375_1551 (size=198) 2024-12-11T20:53:50,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742376_1552 (size=663) 2024-12-11T20:53:50,489 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:53:50,489 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:53:50,490 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:53:50,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742376_1552 (size=663) 2024-12-11T20:53:50,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742376_1552 (size=663) 2024-12-11T20:53:50,496 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T20:53:52,337 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop-12798351291513474439.jar 2024-12-11T20:53:52,337 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:53:52,337 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:53:52,435 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop-5739398910899011043.jar 2024-12-11T20:53:52,436 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:53:52,436 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:53:52,436 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:53:52,436 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:53:52,437 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:53:52,437 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T20:53:52,437 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T20:53:52,438 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T20:53:52,438 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T20:53:52,438 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T20:53:52,439 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T20:53:52,439 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T20:53:52,439 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T20:53:52,439 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T20:53:52,440 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T20:53:52,440 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T20:53:52,440 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T20:53:52,441 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:53:52,441 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:53:52,441 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:53:52,441 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:53:52,442 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T20:53:52,442 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:53:52,442 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T20:53:52,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742377_1553 (size=131440) 2024-12-11T20:53:52,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742377_1553 (size=131440) 2024-12-11T20:53:52,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742377_1553 (size=131440) 2024-12-11T20:53:52,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742378_1554 (size=4188619) 2024-12-11T20:53:52,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742378_1554 (size=4188619) 2024-12-11T20:53:52,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742378_1554 (size=4188619) 2024-12-11T20:53:52,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742379_1555 (size=443172) 2024-12-11T20:53:52,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742379_1555 (size=443172) 2024-12-11T20:53:52,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742379_1555 (size=443172) 2024-12-11T20:53:52,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742380_1556 (size=1323991) 2024-12-11T20:53:52,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742380_1556 (size=1323991) 2024-12-11T20:53:52,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742380_1556 (size=1323991) 2024-12-11T20:53:52,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742381_1557 (size=903544) 2024-12-11T20:53:52,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742381_1557 (size=903544) 2024-12-11T20:53:52,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742381_1557 (size=903544) 2024-12-11T20:53:52,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742382_1558 (size=8360360) 2024-12-11T20:53:52,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742382_1558 (size=8360360) 2024-12-11T20:53:52,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742382_1558 (size=8360360) 2024-12-11T20:53:52,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742383_1559 (size=1877034) 2024-12-11T20:53:52,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742383_1559 (size=1877034) 2024-12-11T20:53:52,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742383_1559 (size=1877034) 2024-12-11T20:53:52,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742384_1560 (size=77835) 2024-12-11T20:53:52,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742384_1560 (size=77835) 2024-12-11T20:53:52,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742384_1560 (size=77835) 2024-12-11T20:53:52,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742385_1561 (size=30949) 2024-12-11T20:53:52,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742385_1561 (size=30949) 2024-12-11T20:53:52,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742385_1561 (size=30949) 2024-12-11T20:53:52,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742386_1562 (size=1597213) 2024-12-11T20:53:52,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742386_1562 (size=1597213) 2024-12-11T20:53:52,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742386_1562 (size=1597213) 2024-12-11T20:53:52,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742387_1563 (size=4695811) 2024-12-11T20:53:52,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742387_1563 (size=4695811) 2024-12-11T20:53:52,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742387_1563 (size=4695811) 2024-12-11T20:53:52,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742388_1564 (size=232957) 2024-12-11T20:53:52,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742388_1564 (size=232957) 2024-12-11T20:53:52,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742388_1564 (size=232957) 2024-12-11T20:53:53,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742389_1565 (size=127628) 2024-12-11T20:53:53,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742389_1565 (size=127628) 2024-12-11T20:53:53,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742389_1565 (size=127628) 2024-12-11T20:53:53,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742390_1566 (size=20406) 2024-12-11T20:53:53,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742390_1566 (size=20406) 2024-12-11T20:53:53,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742390_1566 (size=20406) 2024-12-11T20:53:53,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742391_1567 (size=5175431) 2024-12-11T20:53:53,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742391_1567 (size=5175431) 2024-12-11T20:53:53,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742391_1567 (size=5175431) 2024-12-11T20:53:53,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742392_1568 (size=217634) 2024-12-11T20:53:53,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742392_1568 (size=217634) 2024-12-11T20:53:53,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742392_1568 (size=217634) 2024-12-11T20:53:53,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742393_1569 (size=1832290) 2024-12-11T20:53:53,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742393_1569 (size=1832290) 2024-12-11T20:53:53,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742393_1569 (size=1832290) 2024-12-11T20:53:53,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742394_1570 (size=322274) 2024-12-11T20:53:53,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742394_1570 (size=322274) 2024-12-11T20:53:53,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742394_1570 (size=322274) 2024-12-11T20:53:53,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742395_1571 (size=503880) 2024-12-11T20:53:53,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742395_1571 (size=503880) 2024-12-11T20:53:53,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742395_1571 (size=503880) 2024-12-11T20:53:53,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742396_1572 (size=29229) 2024-12-11T20:53:53,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742396_1572 (size=29229) 2024-12-11T20:53:53,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742396_1572 (size=29229) 2024-12-11T20:53:53,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742397_1573 (size=24096) 2024-12-11T20:53:53,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742397_1573 (size=24096) 2024-12-11T20:53:53,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742397_1573 (size=24096) 2024-12-11T20:53:53,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742398_1574 (size=111872) 2024-12-11T20:53:53,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742398_1574 (size=111872) 2024-12-11T20:53:53,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742398_1574 (size=111872) 2024-12-11T20:53:53,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742399_1575 (size=6426228) 2024-12-11T20:53:53,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742399_1575 (size=6426228) 2024-12-11T20:53:53,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742399_1575 (size=6426228) 2024-12-11T20:53:53,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742400_1576 (size=45609) 2024-12-11T20:53:53,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742400_1576 (size=45609) 2024-12-11T20:53:53,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742400_1576 (size=45609) 2024-12-11T20:53:53,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742401_1577 (size=136454) 2024-12-11T20:53:53,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742401_1577 (size=136454) 2024-12-11T20:53:53,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742401_1577 (size=136454) 2024-12-11T20:53:53,168 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T20:53:53,171 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-12-11T20:53:53,173 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-11T20:53:53,173 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-11T20:53:53,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742402_1578 (size=469) 2024-12-11T20:53:53,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742402_1578 (size=469) 2024-12-11T20:53:53,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742402_1578 (size=469) 2024-12-11T20:53:53,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742403_1579 (size=21) 2024-12-11T20:53:53,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742403_1579 (size=21) 2024-12-11T20:53:53,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742403_1579 (size=21) 2024-12-11T20:53:53,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742404_1580 (size=304172) 2024-12-11T20:53:53,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742404_1580 (size=304172) 2024-12-11T20:53:53,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742404_1580 (size=304172) 2024-12-11T20:53:53,215 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T20:53:53,215 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T20:53:53,793 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0011_000001 (auth:SIMPLE) from 127.0.0.1:34346 2024-12-11T20:53:53,973 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T20:53:53,973 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-12-11T20:53:53,974 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-11T20:53:59,476 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T20:54:00,548 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0011_000001 (auth:SIMPLE) from 127.0.0.1:46650 2024-12-11T20:54:00,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742405_1581 (size=349894) 2024-12-11T20:54:00,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742405_1581 (size=349894) 2024-12-11T20:54:00,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742405_1581 (size=349894) 2024-12-11T20:54:01,790 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T20:54:02,831 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0011_000001 (auth:SIMPLE) from 127.0.0.1:55670 2024-12-11T20:54:02,836 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0011_000001 (auth:SIMPLE) from 127.0.0.1:41320 2024-12-11T20:54:07,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742406_1582 (size=8256) 2024-12-11T20:54:07,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742406_1582 (size=8256) 2024-12-11T20:54:07,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742406_1582 (size=8256) 2024-12-11T20:54:09,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742408_1584 (size=5354) 2024-12-11T20:54:09,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742408_1584 (size=5354) 2024-12-11T20:54:09,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742408_1584 (size=5354) 2024-12-11T20:54:10,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742407_1583 (size=22223) 2024-12-11T20:54:10,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742407_1583 (size=22223) 2024-12-11T20:54:10,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742407_1583 (size=22223) 2024-12-11T20:54:10,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742409_1585 (size=477) 2024-12-11T20:54:10,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742409_1585 (size=477) 2024-12-11T20:54:10,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742409_1585 (size=477) 2024-12-11T20:54:10,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742410_1586 (size=22223) 2024-12-11T20:54:10,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742410_1586 (size=22223) 2024-12-11T20:54:10,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742410_1586 (size=22223) 2024-12-11T20:54:10,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742411_1587 (size=349894) 2024-12-11T20:54:10,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742411_1587 (size=349894) 2024-12-11T20:54:10,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742411_1587 (size=349894) 2024-12-11T20:54:10,328 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0011_000001 (auth:SIMPLE) from 127.0.0.1:41330 2024-12-11T20:54:10,473 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 58264c5b234b4bd11beed47f41492f9c, had cached 0 bytes from a total of 8188 2024-12-11T20:54:10,473 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 08787acc4c8783f43f2b29bbed40a0de, had cached 0 bytes from a total of 5422 2024-12-11T20:54:11,679 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T20:54:11,679 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T20:54:11,707 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T20:54:11,707 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T20:54:11,710 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T20:54:11,710 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_754086280_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T20:54:11,711 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-11T20:54:11,711 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-11T20:54:11,711 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_754086280_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950430042/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950430042/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T20:54:11,712 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950430042/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-11T20:54:11,712 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/export-test/export-1733950430042/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-11T20:54:11,745 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T20:54:11,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=242, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T20:54:11,750 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950451749"}]},"ts":"1733950451749"} 2024-12-11T20:54:11,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-11T20:54:11,752 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-12-11T20:54:11,752 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-12-11T20:54:11,754 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=243, ppid=242, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-12-11T20:54:11,757 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=244, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e2ff4c9ce824780c6b1399c9899ab461, UNASSIGN}, {pid=245, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3a20a5de6b946146e26495f720f58e54, UNASSIGN}] 2024-12-11T20:54:11,758 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=245, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3a20a5de6b946146e26495f720f58e54, UNASSIGN 2024-12-11T20:54:11,759 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e2ff4c9ce824780c6b1399c9899ab461, UNASSIGN 2024-12-11T20:54:11,760 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=245 updating hbase:meta row=3a20a5de6b946146e26495f720f58e54, regionState=CLOSING, regionLocation=5be53b084ac7,40781,1733949995244 2024-12-11T20:54:11,763 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=245, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3a20a5de6b946146e26495f720f58e54, UNASSIGN because future has completed 2024-12-11T20:54:11,764 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T20:54:11,764 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=246, ppid=245, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3a20a5de6b946146e26495f720f58e54, server=5be53b084ac7,40781,1733949995244}] 2024-12-11T20:54:11,766 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=244 updating hbase:meta row=e2ff4c9ce824780c6b1399c9899ab461, regionState=CLOSING, regionLocation=5be53b084ac7,39865,1733949995112 2024-12-11T20:54:11,769 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=244, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e2ff4c9ce824780c6b1399c9899ab461, UNASSIGN because future has completed 2024-12-11T20:54:11,773 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T20:54:11,773 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=247, ppid=244, state=RUNNABLE, hasLock=false; CloseRegionProcedure e2ff4c9ce824780c6b1399c9899ab461, server=5be53b084ac7,39865,1733949995112}] 2024-12-11T20:54:11,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-11T20:54:11,921 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] handler.UnassignRegionHandler(122): Close 3a20a5de6b946146e26495f720f58e54 2024-12-11T20:54:11,921 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T20:54:11,921 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1722): Closing 3a20a5de6b946146e26495f720f58e54, disabling compactions & flushes 2024-12-11T20:54:11,921 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54. 2024-12-11T20:54:11,921 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54. 2024-12-11T20:54:11,921 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54. after waiting 0 ms 2024-12-11T20:54:11,921 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54. 2024-12-11T20:54:11,926 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] handler.UnassignRegionHandler(122): Close e2ff4c9ce824780c6b1399c9899ab461 2024-12-11T20:54:11,926 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T20:54:11,926 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1722): Closing e2ff4c9ce824780c6b1399c9899ab461, disabling compactions & flushes 2024-12-11T20:54:11,926 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461. 2024-12-11T20:54:11,926 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461. 2024-12-11T20:54:11,926 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461. after waiting 0 ms 2024-12-11T20:54:11,927 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461. 2024-12-11T20:54:11,937 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2ff4c9ce824780c6b1399c9899ab461/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T20:54:11,939 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:54:11,939 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461. 2024-12-11T20:54:11,939 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1676): Region close journal for e2ff4c9ce824780c6b1399c9899ab461: Waiting for close lock at 1733950451926Running coprocessor pre-close hooks at 1733950451926Disabling compacts and flushes for region at 1733950451926Disabling writes for close at 1733950451926Writing region close event to WAL at 1733950451932 (+6 ms)Running coprocessor post-close hooks at 1733950451939 (+7 ms)Closed at 1733950451939 2024-12-11T20:54:11,944 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] handler.UnassignRegionHandler(157): Closed e2ff4c9ce824780c6b1399c9899ab461 2024-12-11T20:54:11,944 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=244 updating hbase:meta row=e2ff4c9ce824780c6b1399c9899ab461, regionState=CLOSED 2024-12-11T20:54:11,946 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=247, ppid=244, state=RUNNABLE, hasLock=false; CloseRegionProcedure e2ff4c9ce824780c6b1399c9899ab461, server=5be53b084ac7,39865,1733949995112 because future has completed 2024-12-11T20:54:11,951 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=247, resume processing ppid=244 2024-12-11T20:54:11,951 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=247, ppid=244, state=SUCCESS, hasLock=false; CloseRegionProcedure e2ff4c9ce824780c6b1399c9899ab461, server=5be53b084ac7,39865,1733949995112 in 175 msec 2024-12-11T20:54:11,960 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=244, ppid=243, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e2ff4c9ce824780c6b1399c9899ab461, UNASSIGN in 194 msec 2024-12-11T20:54:11,960 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/3a20a5de6b946146e26495f720f58e54/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T20:54:11,962 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:54:11,962 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54. 2024-12-11T20:54:11,962 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1676): Region close journal for 3a20a5de6b946146e26495f720f58e54: Waiting for close lock at 1733950451921Running coprocessor pre-close hooks at 1733950451921Disabling compacts and flushes for region at 1733950451921Disabling writes for close at 1733950451921Writing region close event to WAL at 1733950451929 (+8 ms)Running coprocessor post-close hooks at 1733950451962 (+33 ms)Closed at 1733950451962 2024-12-11T20:54:11,964 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] handler.UnassignRegionHandler(157): Closed 3a20a5de6b946146e26495f720f58e54 2024-12-11T20:54:11,966 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=245 updating hbase:meta row=3a20a5de6b946146e26495f720f58e54, regionState=CLOSED 2024-12-11T20:54:11,970 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=246, ppid=245, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3a20a5de6b946146e26495f720f58e54, server=5be53b084ac7,40781,1733949995244 because future has completed 2024-12-11T20:54:11,974 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=246, resume processing ppid=245 2024-12-11T20:54:11,974 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=246, ppid=245, state=SUCCESS, hasLock=false; CloseRegionProcedure 3a20a5de6b946146e26495f720f58e54, server=5be53b084ac7,40781,1733949995244 in 207 msec 2024-12-11T20:54:11,983 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=245, resume processing ppid=243 2024-12-11T20:54:11,983 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=245, ppid=243, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3a20a5de6b946146e26495f720f58e54, UNASSIGN in 217 msec 2024-12-11T20:54:11,988 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=243, resume processing ppid=242 2024-12-11T20:54:11,988 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=243, ppid=242, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 232 msec 2024-12-11T20:54:11,990 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733950451990"}]},"ts":"1733950451990"} 2024-12-11T20:54:11,992 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-12-11T20:54:11,992 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-12-11T20:54:11,995 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=242, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 248 msec 2024-12-11T20:54:12,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-11T20:54:12,073 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-11T20:54:12,073 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T20:54:12,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] procedure2.ProcedureExecutor(1139): Stored pid=248, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T20:54:12,075 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=248, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T20:54:12,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T20:54:12,077 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=248, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T20:54:12,080 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39865 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T20:54:12,081 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2ff4c9ce824780c6b1399c9899ab461 2024-12-11T20:54:12,081 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/3a20a5de6b946146e26495f720f58e54 2024-12-11T20:54:12,083 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/3a20a5de6b946146e26495f720f58e54/cf, FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/3a20a5de6b946146e26495f720f58e54/recovered.edits] 2024-12-11T20:54:12,083 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2ff4c9ce824780c6b1399c9899ab461/cf, FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2ff4c9ce824780c6b1399c9899ab461/recovered.edits] 2024-12-11T20:54:12,113 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/3a20a5de6b946146e26495f720f58e54/cf/6724669562f54775a50d700fe56c004b to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/3a20a5de6b946146e26495f720f58e54/cf/6724669562f54775a50d700fe56c004b 2024-12-11T20:54:12,113 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2ff4c9ce824780c6b1399c9899ab461/cf/6ca929e5f3204b2b9abd47cfb037b44f to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2ff4c9ce824780c6b1399c9899ab461/cf/6ca929e5f3204b2b9abd47cfb037b44f 2024-12-11T20:54:12,117 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2ff4c9ce824780c6b1399c9899ab461/recovered.edits/9.seqid to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2ff4c9ce824780c6b1399c9899ab461/recovered.edits/9.seqid 2024-12-11T20:54:12,117 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/3a20a5de6b946146e26495f720f58e54/recovered.edits/9.seqid to hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/3a20a5de6b946146e26495f720f58e54/recovered.edits/9.seqid 2024-12-11T20:54:12,118 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/e2ff4c9ce824780c6b1399c9899ab461 2024-12-11T20:54:12,118 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testtb-testExportFileSystemStateWithSkipTmp/3a20a5de6b946146e26495f720f58e54 2024-12-11T20:54:12,118 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-12-11T20:54:12,123 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=248, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T20:54:12,126 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-12-11T20:54:12,131 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-12-11T20:54:12,132 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=248, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T20:54:12,132 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-12-11T20:54:12,133 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733950452132"}]},"ts":"9223372036854775807"} 2024-12-11T20:54:12,133 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733950452132"}]},"ts":"9223372036854775807"} 2024-12-11T20:54:12,135 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T20:54:12,135 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => e2ff4c9ce824780c6b1399c9899ab461, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733950425341.e2ff4c9ce824780c6b1399c9899ab461.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 3a20a5de6b946146e26495f720f58e54, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733950425341.3a20a5de6b946146e26495f720f58e54.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T20:54:12,135 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-12-11T20:54:12,136 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733950452135"}]},"ts":"9223372036854775807"} 2024-12-11T20:54:12,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T20:54:12,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T20:54:12,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T20:54:12,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T20:54:12,139 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-12-11T20:54:12,140 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=248, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T20:54:12,141 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=248, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 67 msec 2024-12-11T20:54:12,143 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-11T20:54:12,143 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-11T20:54:12,143 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-11T20:54:12,143 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-11T20:54:12,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T20:54:12,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T20:54:12,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T20:54:12,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:54:12,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:54:12,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:54:12,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T20:54:12,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T20:54:12,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=248 2024-12-11T20:54:12,162 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T20:54:12,162 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-11T20:54:12,171 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:54:12,171 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:54:12,171 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:54:12,172 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T20:54:12,187 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-12-11T20:54:12,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T20:54:12,191 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-12-11T20:54:12,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T20:54:12,218 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=818 (was 811) Potentially hanging thread: Thread-8793 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (166867004) connection to localhost/127.0.0.1:38381 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_754086280_22 at /127.0.0.1:33106 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38381 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-334154043_1 at /127.0.0.1:49644 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_754086280_22 at /127.0.0.1:40310 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-26 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_754086280_22 at /127.0.0.1:42384 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-25 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 144140) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=798 (was 797) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1291 (was 1396), ProcessCount=21 (was 31), AvailableMemoryMB=1346 (was 1069) - AvailableMemoryMB LEAK? - 2024-12-11T20:54:12,218 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=818 is superior to 500 2024-12-11T20:54:12,218 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2377): Stopping mini mapreduce cluster... 2024-12-11T20:54:12,237 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3dcf889b{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-11T20:54:12,241 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@21af667e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T20:54:12,241 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T20:54:12,241 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c2f3736{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-11T20:54:12,241 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30a89850{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop.log.dir/,STOPPED} 2024-12-11T20:54:13,553 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_1/usercache/jenkins/appcache/application_1733950008312_0011/container_1733950008312_0011_01_000002/launch_container.sh] 2024-12-11T20:54:13,553 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_1/usercache/jenkins/appcache/application_1733950008312_0011/container_1733950008312_0011_01_000002/container_tokens] 2024-12-11T20:54:13,553 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_1/usercache/jenkins/appcache/application_1733950008312_0011/container_1733950008312_0011_01_000002/sysfs] 2024-12-11T20:54:13,973 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T20:54:15,358 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_3/usercache/jenkins/appcache/application_1733950008312_0011/container_1733950008312_0011_01_000003/launch_container.sh] 2024-12-11T20:54:15,358 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_3/usercache/jenkins/appcache/application_1733950008312_0011/container_1733950008312_0011_01_000003/container_tokens] 2024-12-11T20:54:15,358 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-1_3/usercache/jenkins/appcache/application_1733950008312_0011/container_1733950008312_0011_01_000003/sysfs] 2024-12-11T20:54:16,597 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733950008312_0011_000001 (auth:SIMPLE) from 127.0.0.1:57332 2024-12-11T20:54:16,641 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_0/usercache/jenkins/appcache/application_1733950008312_0011/container_1733950008312_0011_01_000001/launch_container.sh] 2024-12-11T20:54:16,641 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_0/usercache/jenkins/appcache/application_1733950008312_0011/container_1733950008312_0011_01_000001/container_tokens] 2024-12-11T20:54:16,641 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_332189238/yarn-380572990/MiniMRCluster_332189238-localDir-nm-0_0/usercache/jenkins/appcache/application_1733950008312_0011/container_1733950008312_0011_01_000001/sysfs] 2024-12-11T20:54:17,072 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-11T20:54:17,072 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=85.70 KB heapSize=135.56 KB 2024-12-11T20:54:17,216 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/meta/1588230740/.tmp/info/915b3e5dda9b4172ada95589d3512b8d is 173, key is testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c./info:regioninfo/1733950315483/Put/seqid=0 2024-12-11T20:54:17,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742412_1588 (size=15646) 2024-12-11T20:54:17,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742412_1588 (size=15646) 2024-12-11T20:54:17,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742412_1588 (size=15646) 2024-12-11T20:54:17,316 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=72.69 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/meta/1588230740/.tmp/info/915b3e5dda9b4172ada95589d3512b8d 2024-12-11T20:54:17,388 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/meta/1588230740/.tmp/ns/16b2df99f73d453082bb67080bc202d7 is 124, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289582.b8b7662a0229049c109d58ebdbba07cf./ns:/1733950312900/DeleteFamily/seqid=0 2024-12-11T20:54:17,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742413_1589 (size=8378) 2024-12-11T20:54:17,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742413_1589 (size=8378) 2024-12-11T20:54:17,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742413_1589 (size=8378) 2024-12-11T20:54:17,439 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.79 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/meta/1588230740/.tmp/ns/16b2df99f73d453082bb67080bc202d7 2024-12-11T20:54:17,523 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/meta/1588230740/.tmp/rep_barrier/2c20814edec74d6c871b6cc10d3c1176 is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289582.b8b7662a0229049c109d58ebdbba07cf./rep_barrier:/1733950312900/DeleteFamily/seqid=0 2024-12-11T20:54:17,527 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T20:54:17,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742414_1590 (size=8717) 2024-12-11T20:54:17,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742414_1590 (size=8717) 2024-12-11T20:54:17,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742414_1590 (size=8717) 2024-12-11T20:54:17,666 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.95 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/meta/1588230740/.tmp/rep_barrier/2c20814edec74d6c871b6cc10d3c1176 2024-12-11T20:54:17,817 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/meta/1588230740/.tmp/table/bc350003d83b4215827911f8e8e31b90 is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733950289582.b8b7662a0229049c109d58ebdbba07cf./table:/1733950312900/DeleteFamily/seqid=0 2024-12-11T20:54:17,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742415_1591 (size=9531) 2024-12-11T20:54:17,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742415_1591 (size=9531) 2024-12-11T20:54:17,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742415_1591 (size=9531) 2024-12-11T20:54:17,959 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.27 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/meta/1588230740/.tmp/table/bc350003d83b4215827911f8e8e31b90 2024-12-11T20:54:17,983 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/meta/1588230740/.tmp/info/915b3e5dda9b4172ada95589d3512b8d as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/meta/1588230740/info/915b3e5dda9b4172ada95589d3512b8d 2024-12-11T20:54:18,028 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/meta/1588230740/info/915b3e5dda9b4172ada95589d3512b8d, entries=84, sequenceid=236, filesize=15.3 K 2024-12-11T20:54:18,032 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/meta/1588230740/.tmp/ns/16b2df99f73d453082bb67080bc202d7 as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/meta/1588230740/ns/16b2df99f73d453082bb67080bc202d7 2024-12-11T20:54:18,039 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/meta/1588230740/ns/16b2df99f73d453082bb67080bc202d7, entries=28, sequenceid=236, filesize=8.2 K 2024-12-11T20:54:18,043 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/meta/1588230740/.tmp/rep_barrier/2c20814edec74d6c871b6cc10d3c1176 as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/meta/1588230740/rep_barrier/2c20814edec74d6c871b6cc10d3c1176 2024-12-11T20:54:18,049 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/meta/1588230740/rep_barrier/2c20814edec74d6c871b6cc10d3c1176, entries=26, sequenceid=236, filesize=8.5 K 2024-12-11T20:54:18,067 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/meta/1588230740/.tmp/table/bc350003d83b4215827911f8e8e31b90 as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/meta/1588230740/table/bc350003d83b4215827911f8e8e31b90 2024-12-11T20:54:18,105 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/meta/1588230740/table/bc350003d83b4215827911f8e8e31b90, entries=43, sequenceid=236, filesize=9.3 K 2024-12-11T20:54:18,106 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~85.70 KB/87755, heapSize ~135.50 KB/138752, currentSize=0 B/0 for 1588230740 in 1034ms, sequenceid=236, compaction requested=false 2024-12-11T20:54:18,106 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-11T20:54:29,308 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@174cbe06{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-11T20:54:29,309 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@52e9365b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T20:54:29,309 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T20:54:29,310 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@71b0e2cb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-11T20:54:29,310 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@bfc944e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop.log.dir/,STOPPED} 2024-12-11T20:54:31,791 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T20:54:46,339 ERROR [Thread[Thread-398,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-11T20:54:46,340 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62a90d5c{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-11T20:54:46,343 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2239a866{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T20:54:46,343 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T20:54:46,347 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4bcfbfc5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-11T20:54:46,347 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77cab988{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop.log.dir/,STOPPED} 2024-12-11T20:54:46,365 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-12-11T20:54:46,389 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-12-11T20:54:46,389 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-12-11T20:54:46,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741830_1006 (size=1153958) 2024-12-11T20:54:46,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741830_1006 (size=1153958) 2024-12-11T20:54:46,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741830_1006 (size=1153958) 2024-12-11T20:54:46,405 ERROR [Thread[Thread-422,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-11T20:54:46,413 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3e64743c{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-11T20:54:46,414 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a3501ea{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T20:54:46,414 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T20:54:46,414 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4fa11621{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-11T20:54:46,414 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@240d436c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop.log.dir/,STOPPED} 2024-12-11T20:54:46,419 ERROR [Thread[Thread-380,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-11T20:54:46,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2380): Mini mapreduce cluster stopped 2024-12-11T20:54:46,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-11T20:54:46,419 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-11T20:54:46,419 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T20:54:46,419 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:54:46,420 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:54:46,420 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-11T20:54:46,420 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1945066928, stopped=false 2024-12-11T20:54:46,420 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:54:46,420 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-11T20:54:46,420 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5be53b084ac7,41779,1733949993169 2024-12-11T20:54:46,423 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T20:54:46,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T20:54:46,472 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-11T20:54:46,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T20:54:46,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T20:54:46,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:54:46,472 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-11T20:54:46,473 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T20:54:46,473 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:54:46,473 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5be53b084ac7,40355,1733949994820' ***** 2024-12-11T20:54:46,473 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T20:54:46,473 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:54:46,473 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T20:54:46,473 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5be53b084ac7,39865,1733949995112' ***** 2024-12-11T20:54:46,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:54:46,473 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:54:46,473 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T20:54:46,474 INFO [RS:1;5be53b084ac7:39865 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T20:54:46,474 INFO [RS:0;5be53b084ac7:40355 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T20:54:46,474 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T20:54:46,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T20:54:46,474 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T20:54:46,474 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5be53b084ac7,40781,1733949995244' ***** 2024-12-11T20:54:46,474 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:54:46,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:54:46,474 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T20:54:46,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:54:46,474 INFO [RS:2;5be53b084ac7:40781 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T20:54:46,474 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T20:54:46,475 INFO [RS:2;5be53b084ac7:40781 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T20:54:46,475 INFO [RS:2;5be53b084ac7:40781 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T20:54:46,475 INFO [RS:1;5be53b084ac7:39865 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T20:54:46,475 INFO [RS:1;5be53b084ac7:39865 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T20:54:46,475 INFO [RS:1;5be53b084ac7:39865 {}] regionserver.HRegionServer(3091): Received CLOSE for 58264c5b234b4bd11beed47f41492f9c 2024-12-11T20:54:46,475 INFO [RS:2;5be53b084ac7:40781 {}] regionserver.HRegionServer(3091): Received CLOSE for 08787acc4c8783f43f2b29bbed40a0de 2024-12-11T20:54:46,475 INFO [RS:1;5be53b084ac7:39865 {}] regionserver.HRegionServer(3091): Received CLOSE for 92982b2478254451b467ff12e9fe4706 2024-12-11T20:54:46,475 INFO [RS:1;5be53b084ac7:39865 {}] regionserver.HRegionServer(959): stopping server 5be53b084ac7,39865,1733949995112 2024-12-11T20:54:46,475 INFO [RS:2;5be53b084ac7:40781 {}] regionserver.HRegionServer(959): stopping server 5be53b084ac7,40781,1733949995244 2024-12-11T20:54:46,475 INFO [RS:1;5be53b084ac7:39865 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T20:54:46,475 INFO [RS:2;5be53b084ac7:40781 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T20:54:46,475 INFO [RS:2;5be53b084ac7:40781 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;5be53b084ac7:40781. 2024-12-11T20:54:46,475 INFO [RS:1;5be53b084ac7:39865 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;5be53b084ac7:39865. 2024-12-11T20:54:46,476 DEBUG [RS:2;5be53b084ac7:40781 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T20:54:46,476 DEBUG [RS:1;5be53b084ac7:39865 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T20:54:46,476 DEBUG [RS:2;5be53b084ac7:40781 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:54:46,476 DEBUG [RS:1;5be53b084ac7:39865 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:54:46,476 INFO [RS:1;5be53b084ac7:39865 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-11T20:54:46,476 INFO [RS:2;5be53b084ac7:40781 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T20:54:46,476 INFO [RS:2;5be53b084ac7:40781 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T20:54:46,476 INFO [RS:2;5be53b084ac7:40781 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T20:54:46,476 DEBUG [RS:1;5be53b084ac7:39865 {}] regionserver.HRegionServer(1325): Online Regions={58264c5b234b4bd11beed47f41492f9c=testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c., 92982b2478254451b467ff12e9fe4706=hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706.} 2024-12-11T20:54:46,476 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 58264c5b234b4bd11beed47f41492f9c, disabling compactions & flushes 2024-12-11T20:54:46,476 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c. 2024-12-11T20:54:46,476 INFO [RS:2;5be53b084ac7:40781 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-11T20:54:46,476 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c. 2024-12-11T20:54:46,476 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c. after waiting 0 ms 2024-12-11T20:54:46,476 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c. 2024-12-11T20:54:46,476 DEBUG [RS:1;5be53b084ac7:39865 {}] regionserver.HRegionServer(1351): Waiting on 58264c5b234b4bd11beed47f41492f9c, 92982b2478254451b467ff12e9fe4706 2024-12-11T20:54:46,476 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 08787acc4c8783f43f2b29bbed40a0de, disabling compactions & flushes 2024-12-11T20:54:46,476 INFO [RS:0;5be53b084ac7:40355 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T20:54:46,476 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1733950315084.08787acc4c8783f43f2b29bbed40a0de. 2024-12-11T20:54:46,476 INFO [RS:0;5be53b084ac7:40355 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T20:54:46,476 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1733950315084.08787acc4c8783f43f2b29bbed40a0de. 2024-12-11T20:54:46,476 INFO [RS:0;5be53b084ac7:40355 {}] regionserver.HRegionServer(959): stopping server 5be53b084ac7,40355,1733949994820 2024-12-11T20:54:46,476 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1733950315084.08787acc4c8783f43f2b29bbed40a0de. after waiting 0 ms 2024-12-11T20:54:46,476 INFO [RS:0;5be53b084ac7:40355 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T20:54:46,476 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1733950315084.08787acc4c8783f43f2b29bbed40a0de. 2024-12-11T20:54:46,476 INFO [RS:0;5be53b084ac7:40355 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5be53b084ac7:40355. 2024-12-11T20:54:46,477 INFO [RS:2;5be53b084ac7:40781 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-11T20:54:46,477 DEBUG [RS:2;5be53b084ac7:40781 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 08787acc4c8783f43f2b29bbed40a0de=testExportExpiredSnapshot,,1733950315084.08787acc4c8783f43f2b29bbed40a0de.} 2024-12-11T20:54:46,477 DEBUG [RS:2;5be53b084ac7:40781 {}] regionserver.HRegionServer(1351): Waiting on 08787acc4c8783f43f2b29bbed40a0de, 1588230740 2024-12-11T20:54:46,476 DEBUG [RS:0;5be53b084ac7:40355 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T20:54:46,477 DEBUG [RS:0;5be53b084ac7:40355 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:54:46,477 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-11T20:54:46,477 INFO [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-11T20:54:46,477 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-11T20:54:46,477 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T20:54:46,477 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T20:54:46,477 INFO [RS:0;5be53b084ac7:40355 {}] regionserver.HRegionServer(976): stopping server 5be53b084ac7,40355,1733949994820; all regions closed. 2024-12-11T20:54:46,477 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T20:54:46,477 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T20:54:46,479 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T20:54:46,479 INFO [regionserver/5be53b084ac7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T20:54:46,479 INFO [regionserver/5be53b084ac7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T20:54:46,493 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportExpiredSnapshot/08787acc4c8783f43f2b29bbed40a0de/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-11T20:54:46,494 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:54:46,494 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1733950315084.08787acc4c8783f43f2b29bbed40a0de. 2024-12-11T20:54:46,494 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 08787acc4c8783f43f2b29bbed40a0de: Waiting for close lock at 1733950486476Running coprocessor pre-close hooks at 1733950486476Disabling compacts and flushes for region at 1733950486476Disabling writes for close at 1733950486476Writing region close event to WAL at 1733950486484 (+8 ms)Running coprocessor post-close hooks at 1733950486493 (+9 ms)Closed at 1733950486494 (+1 ms) 2024-12-11T20:54:46,494 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1733950315084.08787acc4c8783f43f2b29bbed40a0de. 2024-12-11T20:54:46,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741834_1010 (size=12222) 2024-12-11T20:54:46,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741834_1010 (size=12222) 2024-12-11T20:54:46,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741834_1010 (size=12222) 2024-12-11T20:54:46,507 DEBUG [RS:0;5be53b084ac7:40355 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/oldWALs 2024-12-11T20:54:46,507 INFO [RS:0;5be53b084ac7:40355 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5be53b084ac7%2C40355%2C1733949994820:(num 1733950000352) 2024-12-11T20:54:46,507 DEBUG [RS:0;5be53b084ac7:40355 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:54:46,507 INFO [RS:0;5be53b084ac7:40355 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T20:54:46,507 INFO [RS:0;5be53b084ac7:40355 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T20:54:46,508 INFO [RS:0;5be53b084ac7:40355 {}] hbase.ChoreService(370): Chore service for: regionserver/5be53b084ac7:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-11T20:54:46,508 INFO [RS:0;5be53b084ac7:40355 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T20:54:46,508 INFO [RS:0;5be53b084ac7:40355 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T20:54:46,508 INFO [RS:0;5be53b084ac7:40355 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T20:54:46,508 INFO [RS:0;5be53b084ac7:40355 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T20:54:46,508 INFO [RS:0;5be53b084ac7:40355 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40355 2024-12-11T20:54:46,511 INFO [regionserver/5be53b084ac7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T20:54:46,516 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/meta/1588230740/recovered.edits/239.seqid, newMaxSeqId=239, maxSeqId=1 2024-12-11T20:54:46,517 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:54:46,517 INFO [regionserver/5be53b084ac7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T20:54:46,517 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-11T20:54:46,517 INFO [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-11T20:54:46,517 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733950486477Running coprocessor pre-close hooks at 1733950486477Disabling compacts and flushes for region at 1733950486477Disabling writes for close at 1733950486477Writing region close event to WAL at 1733950486503 (+26 ms)Running coprocessor post-close hooks at 1733950486517 (+14 ms)Closed at 1733950486517 2024-12-11T20:54:46,517 DEBUG [RS_CLOSE_META-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-11T20:54:46,519 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/default/testExportExpiredSnapshot/58264c5b234b4bd11beed47f41492f9c/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-11T20:54:46,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5be53b084ac7,40355,1733949994820 2024-12-11T20:54:46,522 INFO [RS:0;5be53b084ac7:40355 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T20:54:46,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T20:54:46,523 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5be53b084ac7,40355,1733949994820] 2024-12-11T20:54:46,535 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:54:46,535 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c. 2024-12-11T20:54:46,535 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 58264c5b234b4bd11beed47f41492f9c: Waiting for close lock at 1733950486475Running coprocessor pre-close hooks at 1733950486476 (+1 ms)Disabling compacts and flushes for region at 1733950486476Disabling writes for close at 1733950486476Writing region close event to WAL at 1733950486496 (+20 ms)Running coprocessor post-close hooks at 1733950486535 (+39 ms)Closed at 1733950486535 2024-12-11T20:54:46,535 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1733950315084.58264c5b234b4bd11beed47f41492f9c. 2024-12-11T20:54:46,535 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 92982b2478254451b467ff12e9fe4706, disabling compactions & flushes 2024-12-11T20:54:46,535 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706. 2024-12-11T20:54:46,535 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706. 2024-12-11T20:54:46,535 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706. after waiting 0 ms 2024-12-11T20:54:46,535 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706. 2024-12-11T20:54:46,535 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 92982b2478254451b467ff12e9fe4706 1/1 column families, dataSize=1.65 KB heapSize=3.90 KB 2024-12-11T20:54:46,537 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5be53b084ac7,40355,1733949994820 already deleted, retry=false 2024-12-11T20:54:46,537 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5be53b084ac7,40355,1733949994820 expired; onlineServers=2 2024-12-11T20:54:46,565 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/acl/92982b2478254451b467ff12e9fe4706/.tmp/l/1f1694eb203c4d84a17a1960278c1b3b is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1733950312859/DeleteFamily/seqid=0 2024-12-11T20:54:46,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742416_1592 (size=5860) 2024-12-11T20:54:46,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742416_1592 (size=5860) 2024-12-11T20:54:46,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742416_1592 (size=5860) 2024-12-11T20:54:46,578 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=31 (bloomFilter=false), to=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/acl/92982b2478254451b467ff12e9fe4706/.tmp/l/1f1694eb203c4d84a17a1960278c1b3b 2024-12-11T20:54:46,588 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1f1694eb203c4d84a17a1960278c1b3b 2024-12-11T20:54:46,589 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/acl/92982b2478254451b467ff12e9fe4706/.tmp/l/1f1694eb203c4d84a17a1960278c1b3b as hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/acl/92982b2478254451b467ff12e9fe4706/l/1f1694eb203c4d84a17a1960278c1b3b 2024-12-11T20:54:46,600 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1f1694eb203c4d84a17a1960278c1b3b 2024-12-11T20:54:46,600 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/acl/92982b2478254451b467ff12e9fe4706/l/1f1694eb203c4d84a17a1960278c1b3b, entries=14, sequenceid=31, filesize=5.7 K 2024-12-11T20:54:46,603 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1694, heapSize ~3.88 KB/3976, currentSize=0 B/0 for 92982b2478254451b467ff12e9fe4706 in 68ms, sequenceid=31, compaction requested=false 2024-12-11T20:54:46,610 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/data/hbase/acl/92982b2478254451b467ff12e9fe4706/recovered.edits/34.seqid, newMaxSeqId=34, maxSeqId=1 2024-12-11T20:54:46,610 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:54:46,610 INFO [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706. 2024-12-11T20:54:46,610 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 92982b2478254451b467ff12e9fe4706: Waiting for close lock at 1733950486535Running coprocessor pre-close hooks at 1733950486535Disabling compacts and flushes for region at 1733950486535Disabling writes for close at 1733950486535Obtaining lock to block concurrent updates at 1733950486535Preparing flush snapshotting stores in 92982b2478254451b467ff12e9fe4706 at 1733950486535Finished memstore snapshotting hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706., syncing WAL and waiting on mvcc, flushsize=dataSize=1694, getHeapSize=3976, getOffHeapSize=0, getCellsCount=27 at 1733950486536 (+1 ms)Flushing stores of hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706. at 1733950486539 (+3 ms)Flushing 92982b2478254451b467ff12e9fe4706/l: creating writer at 1733950486539Flushing 92982b2478254451b467ff12e9fe4706/l: appending metadata at 1733950486565 (+26 ms)Flushing 92982b2478254451b467ff12e9fe4706/l: closing flushed file at 1733950486565Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7372c7c2: reopening flushed file at 1733950486588 (+23 ms)Finished flush of dataSize ~1.65 KB/1694, heapSize ~3.88 KB/3976, currentSize=0 B/0 for 92982b2478254451b467ff12e9fe4706 in 68ms, sequenceid=31, compaction requested=false at 1733950486603 (+15 ms)Writing region close event to WAL at 1733950486607 (+4 ms)Running coprocessor post-close hooks at 1733950486610 (+3 ms)Closed at 1733950486610 2024-12-11T20:54:46,611 DEBUG [RS_CLOSE_REGION-regionserver/5be53b084ac7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1733950002196.92982b2478254451b467ff12e9fe4706. 2024-12-11T20:54:46,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T20:54:46,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40355-0x10016aeea9b0001, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T20:54:46,676 INFO [RS:0;5be53b084ac7:40355 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T20:54:46,676 INFO [RS:0;5be53b084ac7:40355 {}] regionserver.HRegionServer(1031): Exiting; stopping=5be53b084ac7,40355,1733949994820; zookeeper connection closed. 2024-12-11T20:54:46,676 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@42dd54a2 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@42dd54a2 2024-12-11T20:54:46,676 INFO [RS:1;5be53b084ac7:39865 {}] regionserver.HRegionServer(976): stopping server 5be53b084ac7,39865,1733949995112; all regions closed. 2024-12-11T20:54:46,677 INFO [RS:2;5be53b084ac7:40781 {}] regionserver.HRegionServer(976): stopping server 5be53b084ac7,40781,1733949995244; all regions closed. 2024-12-11T20:54:46,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741835_1011 (size=17277) 2024-12-11T20:54:46,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741835_1011 (size=17277) 2024-12-11T20:54:46,684 DEBUG [RS:1;5be53b084ac7:39865 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/oldWALs 2024-12-11T20:54:46,684 INFO [RS:1;5be53b084ac7:39865 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5be53b084ac7%2C39865%2C1733949995112:(num 1733950000355) 2024-12-11T20:54:46,684 DEBUG [RS:1;5be53b084ac7:39865 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:54:46,684 INFO [RS:1;5be53b084ac7:39865 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T20:54:46,684 INFO [RS:1;5be53b084ac7:39865 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T20:54:46,684 INFO [RS:1;5be53b084ac7:39865 {}] hbase.ChoreService(370): Chore service for: regionserver/5be53b084ac7:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-11T20:54:46,685 INFO [RS:1;5be53b084ac7:39865 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T20:54:46,685 INFO [RS:1;5be53b084ac7:39865 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T20:54:46,685 INFO [RS:1;5be53b084ac7:39865 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T20:54:46,685 INFO [RS:1;5be53b084ac7:39865 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T20:54:46,685 INFO [regionserver/5be53b084ac7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T20:54:46,685 INFO [RS:1;5be53b084ac7:39865 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39865 2024-12-11T20:54:46,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741836_1012 (size=99973) 2024-12-11T20:54:46,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741836_1012 (size=99973) 2024-12-11T20:54:46,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741836_1012 (size=99973) 2024-12-11T20:54:46,692 DEBUG [RS:2;5be53b084ac7:40781 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/oldWALs 2024-12-11T20:54:46,692 INFO [RS:2;5be53b084ac7:40781 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5be53b084ac7%2C40781%2C1733949995244.meta:.meta(num 1733950001214) 2024-12-11T20:54:46,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073741833_1009 (size=13600) 2024-12-11T20:54:46,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073741833_1009 (size=13600) 2024-12-11T20:54:46,735 DEBUG [RS:2;5be53b084ac7:40781 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/oldWALs 2024-12-11T20:54:46,735 INFO [RS:2;5be53b084ac7:40781 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5be53b084ac7%2C40781%2C1733949995244:(num 1733950000356) 2024-12-11T20:54:46,736 DEBUG [RS:2;5be53b084ac7:40781 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T20:54:46,736 INFO [RS:2;5be53b084ac7:40781 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T20:54:46,736 INFO [RS:2;5be53b084ac7:40781 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T20:54:46,736 INFO [RS:2;5be53b084ac7:40781 {}] hbase.ChoreService(370): Chore service for: regionserver/5be53b084ac7:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-11T20:54:46,736 INFO [RS:2;5be53b084ac7:40781 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T20:54:46,736 INFO [regionserver/5be53b084ac7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T20:54:46,736 INFO [RS:2;5be53b084ac7:40781 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40781 2024-12-11T20:54:46,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5be53b084ac7,39865,1733949995112 2024-12-11T20:54:46,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T20:54:46,740 INFO [RS:1;5be53b084ac7:39865 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T20:54:46,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5be53b084ac7,40781,1733949995244 2024-12-11T20:54:46,765 INFO [RS:2;5be53b084ac7:40781 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T20:54:46,811 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5be53b084ac7,39865,1733949995112] 2024-12-11T20:54:46,838 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5be53b084ac7,39865,1733949995112 already deleted, retry=false 2024-12-11T20:54:46,838 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5be53b084ac7,39865,1733949995112 expired; onlineServers=1 2024-12-11T20:54:46,838 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5be53b084ac7,40781,1733949995244] 2024-12-11T20:54:46,846 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/5be53b084ac7,40781,1733949995244 already deleted, retry=false 2024-12-11T20:54:46,847 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5be53b084ac7,40781,1733949995244 expired; onlineServers=0 2024-12-11T20:54:46,847 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5be53b084ac7,41779,1733949993169' ***** 2024-12-11T20:54:46,847 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-11T20:54:46,847 INFO [M:0;5be53b084ac7:41779 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T20:54:46,847 INFO [M:0;5be53b084ac7:41779 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T20:54:46,847 DEBUG [M:0;5be53b084ac7:41779 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-11T20:54:46,847 DEBUG [M:0;5be53b084ac7:41779 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-11T20:54:46,847 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-11T20:54:46,847 DEBUG [master/5be53b084ac7:0:becomeActiveMaster-HFileCleaner.large.0-1733949999162 {}] cleaner.HFileCleaner(306): Exit Thread[master/5be53b084ac7:0:becomeActiveMaster-HFileCleaner.large.0-1733949999162,5,FailOnTimeoutGroup] 2024-12-11T20:54:46,847 DEBUG [master/5be53b084ac7:0:becomeActiveMaster-HFileCleaner.small.0-1733949999162 {}] cleaner.HFileCleaner(306): Exit Thread[master/5be53b084ac7:0:becomeActiveMaster-HFileCleaner.small.0-1733949999162,5,FailOnTimeoutGroup] 2024-12-11T20:54:46,848 INFO [M:0;5be53b084ac7:41779 {}] hbase.ChoreService(370): Chore service for: master/5be53b084ac7:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-11T20:54:46,848 INFO [M:0;5be53b084ac7:41779 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T20:54:46,848 DEBUG [M:0;5be53b084ac7:41779 {}] master.HMaster(1795): Stopping service threads 2024-12-11T20:54:46,848 INFO [M:0;5be53b084ac7:41779 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-11T20:54:46,848 INFO [M:0;5be53b084ac7:41779 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-11T20:54:46,848 INFO [M:0;5be53b084ac7:41779 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-11T20:54:46,849 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-11T20:54:46,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-11T20:54:46,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T20:54:46,855 DEBUG [M:0;5be53b084ac7:41779 {}] zookeeper.ZKUtil(347): master:41779-0x10016aeea9b0000, quorum=127.0.0.1:57311, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-11T20:54:46,855 WARN [M:0;5be53b084ac7:41779 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-11T20:54:46,856 INFO [M:0;5be53b084ac7:41779 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/.lastflushedseqids 2024-12-11T20:54:46,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44295 is added to blk_1073742417_1593 (size=316) 2024-12-11T20:54:46,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073742417_1593 (size=316) 2024-12-11T20:54:46,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45301 is added to blk_1073742417_1593 (size=316) 2024-12-11T20:54:46,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T20:54:46,911 INFO [RS:1;5be53b084ac7:39865 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T20:54:46,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39865-0x10016aeea9b0002, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T20:54:46,911 INFO [RS:1;5be53b084ac7:39865 {}] regionserver.HRegionServer(1031): Exiting; stopping=5be53b084ac7,39865,1733949995112; zookeeper connection closed. 2024-12-11T20:54:46,912 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6a022e6e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6a022e6e 2024-12-11T20:54:46,929 INFO [RS:2;5be53b084ac7:40781 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T20:54:46,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T20:54:46,929 INFO [RS:2;5be53b084ac7:40781 {}] regionserver.HRegionServer(1031): Exiting; stopping=5be53b084ac7,40781,1733949995244; zookeeper connection closed. 2024-12-11T20:54:46,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40781-0x10016aeea9b0003, quorum=127.0.0.1:57311, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T20:54:46,929 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@762dcab {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@762dcab 2024-12-11T20:54:46,930 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-11T20:54:47,272 INFO [M:0;5be53b084ac7:41779 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-11T20:54:47,272 INFO [M:0;5be53b084ac7:41779 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-11T20:54:47,272 DEBUG [M:0;5be53b084ac7:41779 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T20:54:47,286 INFO [M:0;5be53b084ac7:41779 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T20:54:47,286 DEBUG [M:0;5be53b084ac7:41779 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T20:54:47,286 DEBUG [M:0;5be53b084ac7:41779 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T20:54:47,286 DEBUG [M:0;5be53b084ac7:41779 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T20:54:47,287 INFO [M:0;5be53b084ac7:41779 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=980.48 KB heapSize=1.15 MB 2024-12-11T20:54:47,287 ERROR [AsyncFSWAL-0-hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/MasterData-prefix:5be53b084ac7,41779,1733949993169 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/MasterData-prefix:5be53b084ac7,41779,1733949993169,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:414) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:134) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:181) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:100) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1333) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendAndSync(AbstractFSWAL.java:1724) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.consume(AbstractFSWAL.java:1832) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T20:54:49,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741833_1009 (size=13600) 2024-12-11T20:54:49,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46705 is added to blk_1073741835_1011 (size=17277) 2024-12-11T20:54:52,008 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T20:54:53,973 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:54:53,973 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-11T20:54:53,973 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-11T20:54:53,973 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-11T20:54:53,973 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-11T20:54:53,973 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:54:53,973 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-11T20:54:53,973 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T20:54:59,475 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T20:55:01,791 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T20:55:31,791 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;5be53b084ac7:41779 236 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 44 Waited count: 18 Waiting on java.lang.ref.ReferenceQueue$Lock@d338b06 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 18 Waited count: 24 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 22 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4703f0a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 35 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5628 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 57 Waiting on java.util.concurrent.CountDownLatch$Sync@764bfc20 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11482 Waited count: 12294 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 13 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@1761777e Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@4f3f0bd9 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@bf61b4): State: TIMED_WAITING Blocked count: 0 Waited count: 1118 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 112 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1915521514-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1915521514-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1915521514-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1915521514-40): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1915521514-41-acceptor-0@beacdb4-ServerConnector@4f00f70b{HTTP/1.1, (http/1.1)}{localhost:39731}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1915521514-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1915521514-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1915521514-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-11478400-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 38 Waited count: 3142 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18e9f045 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 36717): State: TIMED_WAITING Blocked count: 1 Waited count: 57 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 112 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@71ecfe19): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 186 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4afbb8fc): State: TIMED_WAITING Blocked count: 0 Waited count: 112 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 187 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 54218 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1434 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@453b10b2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 36717): State: TIMED_WAITING Blocked count: 81 Waited count: 2483 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 36717): State: TIMED_WAITING Blocked count: 99 Waited count: 2509 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 36717): State: TIMED_WAITING Blocked count: 83 Waited count: 2492 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 36717): State: TIMED_WAITING Blocked count: 87 Waited count: 2477 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 36717): State: TIMED_WAITING Blocked count: 92 Waited count: 2495 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5d18f67d): State: TIMED_WAITING Blocked count: 0 Waited count: 279 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@17d3a048): State: TIMED_WAITING Blocked count: 0 Waited count: 112 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@65a3f04a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@316beb8c): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(253478452)): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp988370956-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp988370956-87-acceptor-0@1f5e2812-ServerConnector@50927177{HTTP/1.1, (http/1.1)}{localhost:33193}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp988370956-88): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp988370956-89): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-6fb23b47-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@11bf4d1): State: TIMED_WAITING Blocked count: 0 Waited count: 1114 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 46313): State: TIMED_WAITING Blocked count: 1 Waited count: 57 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 112 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 0 Waited count: 306 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c6cfb21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-464186549-172.17.0.2-1733949986605 heartbeating to localhost/127.0.0.1:36717): State: TIMED_WAITING Blocked count: 1408 Waited count: 1595 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3652a1d2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 46313): State: TIMED_WAITING Blocked count: 0 Waited count: 559 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 46313): State: TIMED_WAITING Blocked count: 0 Waited count: 560 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 46313): State: TIMED_WAITING Blocked count: 0 Waited count: 559 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 46313): State: TIMED_WAITING Blocked count: 0 Waited count: 558 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 46313): State: TIMED_WAITING Blocked count: 0 Waited count: 557 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 119 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 112 (IPC Client (166867004) connection to localhost/127.0.0.1:36717 from jenkins): State: TIMED_WAITING Blocked count: 1505 Waited count: 1506 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 113 (IPC Parameter Sending Thread for localhost/127.0.0.1:36717): State: TIMED_WAITING Blocked count: 0 Waited count: 2054 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp558478696-120): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp558478696-121-acceptor-0@4ee25c3b-ServerConnector@41d36900{HTTP/1.1, (http/1.1)}{localhost:40177}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp558478696-122): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp558478696-123): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-2536a978-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@65942ca9): State: TIMED_WAITING Blocked count: 0 Waited count: 1113 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 129 (IPC Server idle connection scanner for port 36451): State: TIMED_WAITING Blocked count: 1 Waited count: 57 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 131 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 112 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 134 (Command processor): State: WAITING Blocked count: 1 Waited count: 307 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c05b3ab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 135 (BP-464186549-172.17.0.2-1733949986605 heartbeating to localhost/127.0.0.1:36717): State: TIMED_WAITING Blocked count: 1413 Waited count: 1595 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5b3b9a90): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 127 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 137 (IPC Server handler 0 on default port 36451): State: TIMED_WAITING Blocked count: 0 Waited count: 560 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 138 (IPC Server handler 1 on default port 36451): State: TIMED_WAITING Blocked count: 0 Waited count: 559 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 2 on default port 36451): State: TIMED_WAITING Blocked count: 0 Waited count: 557 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 3 on default port 36451): State: TIMED_WAITING Blocked count: 0 Waited count: 559 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 4 on default port 36451): State: TIMED_WAITING Blocked count: 0 Waited count: 560 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1414918684-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1414918684-154-acceptor-0@760e8002-ServerConnector@3c465c66{HTTP/1.1, (http/1.1)}{localhost:33915}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1414918684-155): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1414918684-156): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-4356fc3b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4af8baf3): State: TIMED_WAITING Blocked count: 0 Waited count: 1112 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 38091): State: TIMED_WAITING Blocked count: 1 Waited count: 57 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 112 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 317 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1a0a5f17 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-464186549-172.17.0.2-1733949986605 heartbeating to localhost/127.0.0.1:36717): State: TIMED_WAITING Blocked count: 1394 Waited count: 1609 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7cd66212): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 560 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 559 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 560 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 558 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 558 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data4)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data1)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data2)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 196 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data4/current/BP-464186549-172.17.0.2-1733949986605): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data1/current/BP-464186549-172.17.0.2-1733949986605): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data3/current/BP-464186549-172.17.0.2-1733949986605): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data2/current/BP-464186549-172.17.0.2-1733949986605): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 213 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 214 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (java.util.concurrent.ThreadPoolExecutor$Worker@58ac8204[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (java.util.concurrent.ThreadPoolExecutor$Worker@60ca88e8[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data6/current/BP-464186549-172.17.0.2-1733949986605): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data5/current/BP-464186549-172.17.0.2-1733949986605): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (java.util.concurrent.ThreadPoolExecutor$Worker@1bf3964e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57311): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 237 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 56 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 241 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 278 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 242 (SyncThread:0): State: WAITING Blocked count: 9 Waited count: 415 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@10b99afa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 243 (ProcessThread(sid:0 cport:57311):): State: WAITING Blocked count: 3 Waited count: 527 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@55d85242 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 244 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 556 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@31a189ac Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 245 (NIOWorkerThread-1): State: WAITING Blocked count: 6 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 247 (LeaseRenewer:jenkins@localhost:36717): State: TIMED_WAITING Blocked count: 16 Waited count: 581 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@35872e3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 565 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 33 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (Time-limited test-SendThread(127.0.0.1:57311)): State: RUNNABLE Blocked count: 37 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 260 (Time-limited test-EventThread): State: WAITING Blocked count: 9 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29d46d5d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 261 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (zk-event-processor-pool-0): State: WAITING Blocked count: 17 Waited count: 73 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@24ded2f4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-5): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-9): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-13): State: WAITING Blocked count: 1 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41779): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@64d163e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779): State: WAITING Blocked count: 104 Waited count: 491 Waiting on java.util.concurrent.Semaphore$NonfairSync@5b3f04ce Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779): State: WAITING Blocked count: 138 Waited count: 543 Waiting on java.util.concurrent.Semaphore$NonfairSync@510b2471 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41779): State: WAITING Blocked count: 49 Waited count: 12724 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@31015d6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41779): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6c318801 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41779): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6c318801 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=41779): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4f0bf83d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=41779): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@472b6501 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=41779): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@44166697 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=41779): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@24079f94 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 290 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@345d4139 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 291 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 335 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 39 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 288 (M:0;5be53b084ac7:41779): State: TIMED_WAITING Blocked count: 13 Waited count: 4911 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1094/0x00007fe2f4f78b90.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 358 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 56 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/5be53b084ac7:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (master/5be53b084ac7:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (org.apache.hadoop.hdfs.PeerCache@4f577923): State: TIMED_WAITING Blocked count: 0 Waited count: 184 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5480 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 399 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 41 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 49 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 163 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 55 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 54658 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 435 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 73 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 436 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 27 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 456 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41b9b38e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 479 (regionserver/5be53b084ac7:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26d48e50 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 478 (regionserver/5be53b084ac7:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@49912a98 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 480 (regionserver/5be53b084ac7:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@31c79106 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 520 (region-location-0): State: WAITING Blocked count: 11 Waited count: 18 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3dee7173 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 524 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 54374 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 20 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 552 (ForkJoinPool.commonPool-worker-1): State: TIMED_WAITING Blocked count: 0 Waited count: 963 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 564 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 577 (region-location-1): State: WAITING Blocked count: 5 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3dee7173 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 578 (region-location-2): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3dee7173 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 984 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 913 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1047 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1077 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1091 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 69 Waited count: 112 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@728d1755 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1139 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1140 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1201 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1202 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1203 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1255 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1256 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1257 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1259 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1260 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1628 (Container metrics unregistration): State: WAITING Blocked count: 13 Waited count: 50 Waiting on java.util.TaskQueue@3ca6959e Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1654 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 668 Waiting on java.util.concurrent.ForkJoinPool@3f4ade90 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 1855 (region-location-3): State: WAITING Blocked count: 4 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3dee7173 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1856 (region-location-4): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3dee7173 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2041 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3464 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 383 Waiting on java.util.concurrent.ForkJoinPool@3f4ade90 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 5983 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5984 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8414 (ForkJoinPool.commonPool-worker-6): State: WAITING Blocked count: 0 Waited count: 34 Waiting on java.util.concurrent.ForkJoinPool@3f4ade90 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 10125 (AsyncFSWAL-1-hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/MasterData-prefix:5be53b084ac7,41779,1733949993169): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@557172c4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10128 (java.util.concurrent.ThreadPoolExecutor$Worker@5ef2296f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10130 (java.util.concurrent.ThreadPoolExecutor$Worker@13cb80dc[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10133 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 10134 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-11T20:56:01,791 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T20:56:31,791 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T20:56:35,606 DEBUG [master/5be53b084ac7:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=24, reuseRatio=70.59% 2024-12-11T20:56:35,607 DEBUG [master/5be53b084ac7:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-11T20:56:46,868 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;5be53b084ac7:41779 232 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 44 Waited count: 18 Waiting on java.lang.ref.ReferenceQueue$Lock@d338b06 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 18 Waited count: 25 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4703f0a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 38 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6228 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 63 Waiting on java.util.concurrent.CountDownLatch$Sync@397872d4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11482 Waited count: 12295 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 13 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@1761777e Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@4f3f0bd9 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@bf61b4): State: TIMED_WAITING Blocked count: 0 Waited count: 1238 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 124 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1915521514-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1915521514-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1915521514-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1915521514-40): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1915521514-41-acceptor-0@beacdb4-ServerConnector@4f00f70b{HTTP/1.1, (http/1.1)}{localhost:39731}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1915521514-42): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1915521514-43): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1915521514-44): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-11478400-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 38 Waited count: 3142 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18e9f045 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 36717): State: TIMED_WAITING Blocked count: 1 Waited count: 63 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 124 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@71ecfe19): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 206 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4afbb8fc): State: TIMED_WAITING Blocked count: 0 Waited count: 124 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 207 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 60172 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1434 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@453b10b2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 36717): State: TIMED_WAITING Blocked count: 81 Waited count: 2544 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 36717): State: TIMED_WAITING Blocked count: 99 Waited count: 2569 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 36717): State: TIMED_WAITING Blocked count: 83 Waited count: 2553 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 36717): State: TIMED_WAITING Blocked count: 87 Waited count: 2538 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 36717): State: TIMED_WAITING Blocked count: 92 Waited count: 2556 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5d18f67d): State: TIMED_WAITING Blocked count: 0 Waited count: 309 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@17d3a048): State: TIMED_WAITING Blocked count: 0 Waited count: 124 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@65a3f04a): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@316beb8c): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(253478452)): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp988370956-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp988370956-87-acceptor-0@1f5e2812-ServerConnector@50927177{HTTP/1.1, (http/1.1)}{localhost:33193}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp988370956-88): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp988370956-89): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-6fb23b47-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@11bf4d1): State: TIMED_WAITING Blocked count: 0 Waited count: 1234 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 46313): State: TIMED_WAITING Blocked count: 1 Waited count: 63 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 124 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 0 Waited count: 326 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c6cfb21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-464186549-172.17.0.2-1733949986605 heartbeating to localhost/127.0.0.1:36717): State: TIMED_WAITING Blocked count: 1428 Waited count: 1635 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3652a1d2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 46313): State: TIMED_WAITING Blocked count: 0 Waited count: 619 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 46313): State: TIMED_WAITING Blocked count: 0 Waited count: 620 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 46313): State: TIMED_WAITING Blocked count: 0 Waited count: 619 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 46313): State: TIMED_WAITING Blocked count: 0 Waited count: 618 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 46313): State: TIMED_WAITING Blocked count: 0 Waited count: 617 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 119 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 112 (IPC Client (166867004) connection to localhost/127.0.0.1:36717 from jenkins): State: TIMED_WAITING Blocked count: 1565 Waited count: 1566 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 113 (IPC Parameter Sending Thread for localhost/127.0.0.1:36717): State: TIMED_WAITING Blocked count: 0 Waited count: 2114 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp558478696-120): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp558478696-121-acceptor-0@4ee25c3b-ServerConnector@41d36900{HTTP/1.1, (http/1.1)}{localhost:40177}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp558478696-122): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp558478696-123): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-2536a978-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@65942ca9): State: TIMED_WAITING Blocked count: 0 Waited count: 1233 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 129 (IPC Server idle connection scanner for port 36451): State: TIMED_WAITING Blocked count: 1 Waited count: 63 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 131 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 124 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 134 (Command processor): State: WAITING Blocked count: 1 Waited count: 327 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c05b3ab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 135 (BP-464186549-172.17.0.2-1733949986605 heartbeating to localhost/127.0.0.1:36717): State: TIMED_WAITING Blocked count: 1433 Waited count: 1635 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5b3b9a90): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 127 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 137 (IPC Server handler 0 on default port 36451): State: TIMED_WAITING Blocked count: 0 Waited count: 620 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 138 (IPC Server handler 1 on default port 36451): State: TIMED_WAITING Blocked count: 0 Waited count: 619 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 2 on default port 36451): State: TIMED_WAITING Blocked count: 0 Waited count: 617 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 3 on default port 36451): State: TIMED_WAITING Blocked count: 0 Waited count: 619 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 4 on default port 36451): State: TIMED_WAITING Blocked count: 0 Waited count: 620 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1414918684-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1414918684-154-acceptor-0@760e8002-ServerConnector@3c465c66{HTTP/1.1, (http/1.1)}{localhost:33915}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1414918684-155): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1414918684-156): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-4356fc3b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4af8baf3): State: TIMED_WAITING Blocked count: 0 Waited count: 1232 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 38091): State: TIMED_WAITING Blocked count: 1 Waited count: 63 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 124 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 337 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1a0a5f17 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-464186549-172.17.0.2-1733949986605 heartbeating to localhost/127.0.0.1:36717): State: TIMED_WAITING Blocked count: 1414 Waited count: 1649 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7cd66212): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 620 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 619 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 620 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 618 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 618 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data4)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data1)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data2)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 196 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data4/current/BP-464186549-172.17.0.2-1733949986605): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data1/current/BP-464186549-172.17.0.2-1733949986605): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data3/current/BP-464186549-172.17.0.2-1733949986605): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data2/current/BP-464186549-172.17.0.2-1733949986605): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 213 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 214 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3ef89537 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (java.util.concurrent.ThreadPoolExecutor$Worker@58ac8204[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@104630b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (java.util.concurrent.ThreadPoolExecutor$Worker@60ca88e8[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data6/current/BP-464186549-172.17.0.2-1733949986605): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data5/current/BP-464186549-172.17.0.2-1733949986605): State: TIMED_WAITING Blocked count: 4 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@8bd8419 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (java.util.concurrent.ThreadPoolExecutor$Worker@1bf3964e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57311): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 237 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 62 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 241 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 308 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 242 (SyncThread:0): State: WAITING Blocked count: 9 Waited count: 419 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@10b99afa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 243 (ProcessThread(sid:0 cport:57311):): State: WAITING Blocked count: 3 Waited count: 531 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@55d85242 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 244 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 560 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@31a189ac Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 245 (NIOWorkerThread-1): State: WAITING Blocked count: 6 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@35872e3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 608 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 33 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (Time-limited test-SendThread(127.0.0.1:57311)): State: RUNNABLE Blocked count: 37 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 260 (Time-limited test-EventThread): State: WAITING Blocked count: 9 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29d46d5d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 261 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (zk-event-processor-pool-0): State: WAITING Blocked count: 17 Waited count: 73 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@24ded2f4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-5): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-9): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-13): State: WAITING Blocked count: 1 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41779): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@64d163e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779): State: WAITING Blocked count: 104 Waited count: 491 Waiting on java.util.concurrent.Semaphore$NonfairSync@5b3f04ce Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779): State: WAITING Blocked count: 138 Waited count: 543 Waiting on java.util.concurrent.Semaphore$NonfairSync@510b2471 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41779): State: WAITING Blocked count: 49 Waited count: 12724 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@31015d6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41779): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6c318801 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41779): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6c318801 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=41779): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4f0bf83d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=41779): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@472b6501 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=41779): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@44166697 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=41779): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@24079f94 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 290 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@345d4139 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 291 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 335 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 39 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 288 (M:0;5be53b084ac7:41779): State: TIMED_WAITING Blocked count: 13 Waited count: 4911 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1094/0x00007fe2f4f78b90.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 358 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 62 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/5be53b084ac7:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (master/5be53b084ac7:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (org.apache.hadoop.hdfs.PeerCache@4f577923): State: TIMED_WAITING Blocked count: 0 Waited count: 204 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6080 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 399 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 41 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 49 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 175 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@19feda88 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 61 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 60660 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 435 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 73 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 436 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 27 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 456 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41b9b38e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 479 (regionserver/5be53b084ac7:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26d48e50 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 478 (regionserver/5be53b084ac7:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@49912a98 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 480 (regionserver/5be53b084ac7:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@31c79106 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 520 (region-location-0): State: WAITING Blocked count: 11 Waited count: 18 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3dee7173 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 524 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 60375 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 20 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 564 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 577 (region-location-1): State: WAITING Blocked count: 5 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3dee7173 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 578 (region-location-2): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3dee7173 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 984 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 919 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1047 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1077 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1091 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 69 Waited count: 112 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@728d1755 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1139 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1140 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1201 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1202 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1203 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1255 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1256 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1257 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1259 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1260 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1628 (Container metrics unregistration): State: WAITING Blocked count: 13 Waited count: 50 Waiting on java.util.TaskQueue@3ca6959e Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1654 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 668 Waiting on java.util.concurrent.ForkJoinPool@3f4ade90 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 1855 (region-location-3): State: WAITING Blocked count: 4 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3dee7173 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1856 (region-location-4): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3dee7173 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2041 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3464 (ForkJoinPool.commonPool-worker-5): State: TIMED_WAITING Blocked count: 0 Waited count: 384 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 5983 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5984 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8414 (ForkJoinPool.commonPool-worker-6): State: WAITING Blocked count: 0 Waited count: 34 Waiting on java.util.concurrent.ForkJoinPool@3f4ade90 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 10125 (AsyncFSWAL-1-hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/MasterData-prefix:5be53b084ac7,41779,1733949993169): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@557172c4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10134 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10139 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-11T20:57:01,792 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T20:57:31,792 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;5be53b084ac7:41779 231 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 44 Waited count: 18 Waiting on java.lang.ref.ReferenceQueue$Lock@d338b06 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 18 Waited count: 26 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 28 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4703f0a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 41 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6827 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 69 Waiting on java.util.concurrent.CountDownLatch$Sync@2251d490 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11482 Waited count: 12296 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 13 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@1761777e Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@4f3f0bd9 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@bf61b4): State: TIMED_WAITING Blocked count: 0 Waited count: 1358 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 136 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1915521514-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1915521514-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1915521514-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1915521514-40): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1915521514-41-acceptor-0@beacdb4-ServerConnector@4f00f70b{HTTP/1.1, (http/1.1)}{localhost:39731}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1915521514-42): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1915521514-43): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1915521514-44): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-11478400-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 38 Waited count: 3142 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18e9f045 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 36717): State: TIMED_WAITING Blocked count: 1 Waited count: 69 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 136 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@71ecfe19): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 226 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4afbb8fc): State: TIMED_WAITING Blocked count: 0 Waited count: 136 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 227 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 66130 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1434 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@453b10b2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 36717): State: TIMED_WAITING Blocked count: 81 Waited count: 2605 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 36717): State: TIMED_WAITING Blocked count: 99 Waited count: 2630 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 36717): State: TIMED_WAITING Blocked count: 83 Waited count: 2614 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 36717): State: TIMED_WAITING Blocked count: 87 Waited count: 2599 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 36717): State: TIMED_WAITING Blocked count: 92 Waited count: 2617 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5d18f67d): State: TIMED_WAITING Blocked count: 0 Waited count: 339 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@17d3a048): State: TIMED_WAITING Blocked count: 0 Waited count: 136 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@65a3f04a): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@316beb8c): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(253478452)): State: TIMED_WAITING Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp988370956-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp988370956-87-acceptor-0@1f5e2812-ServerConnector@50927177{HTTP/1.1, (http/1.1)}{localhost:33193}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp988370956-88): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp988370956-89): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-6fb23b47-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@11bf4d1): State: TIMED_WAITING Blocked count: 0 Waited count: 1354 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 46313): State: TIMED_WAITING Blocked count: 1 Waited count: 69 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 136 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 0 Waited count: 346 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c6cfb21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-464186549-172.17.0.2-1733949986605 heartbeating to localhost/127.0.0.1:36717): State: TIMED_WAITING Blocked count: 1448 Waited count: 1675 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3652a1d2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 46313): State: TIMED_WAITING Blocked count: 0 Waited count: 679 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 46313): State: TIMED_WAITING Blocked count: 0 Waited count: 680 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 46313): State: TIMED_WAITING Blocked count: 0 Waited count: 679 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 46313): State: TIMED_WAITING Blocked count: 0 Waited count: 678 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 46313): State: TIMED_WAITING Blocked count: 0 Waited count: 677 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 119 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 112 (IPC Client (166867004) connection to localhost/127.0.0.1:36717 from jenkins): State: TIMED_WAITING Blocked count: 1625 Waited count: 1626 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 113 (IPC Parameter Sending Thread for localhost/127.0.0.1:36717): State: TIMED_WAITING Blocked count: 0 Waited count: 2174 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp558478696-120): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp558478696-121-acceptor-0@4ee25c3b-ServerConnector@41d36900{HTTP/1.1, (http/1.1)}{localhost:40177}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp558478696-122): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp558478696-123): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-2536a978-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@65942ca9): State: TIMED_WAITING Blocked count: 0 Waited count: 1353 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 129 (IPC Server idle connection scanner for port 36451): State: TIMED_WAITING Blocked count: 1 Waited count: 69 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 131 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 136 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 134 (Command processor): State: WAITING Blocked count: 1 Waited count: 347 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c05b3ab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 135 (BP-464186549-172.17.0.2-1733949986605 heartbeating to localhost/127.0.0.1:36717): State: TIMED_WAITING Blocked count: 1453 Waited count: 1675 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5b3b9a90): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 127 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 137 (IPC Server handler 0 on default port 36451): State: TIMED_WAITING Blocked count: 0 Waited count: 680 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 138 (IPC Server handler 1 on default port 36451): State: TIMED_WAITING Blocked count: 0 Waited count: 679 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 2 on default port 36451): State: TIMED_WAITING Blocked count: 0 Waited count: 677 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 3 on default port 36451): State: TIMED_WAITING Blocked count: 0 Waited count: 679 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 4 on default port 36451): State: TIMED_WAITING Blocked count: 0 Waited count: 680 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1414918684-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1414918684-154-acceptor-0@760e8002-ServerConnector@3c465c66{HTTP/1.1, (http/1.1)}{localhost:33915}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1414918684-155): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1414918684-156): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-4356fc3b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4af8baf3): State: TIMED_WAITING Blocked count: 0 Waited count: 1352 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 38091): State: TIMED_WAITING Blocked count: 1 Waited count: 69 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 136 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 357 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1a0a5f17 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-464186549-172.17.0.2-1733949986605 heartbeating to localhost/127.0.0.1:36717): State: TIMED_WAITING Blocked count: 1434 Waited count: 1689 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7cd66212): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 680 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 679 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 680 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 680 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 682 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data4)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data1)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data2)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 196 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data4/current/BP-464186549-172.17.0.2-1733949986605): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data1/current/BP-464186549-172.17.0.2-1733949986605): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data3/current/BP-464186549-172.17.0.2-1733949986605): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data2/current/BP-464186549-172.17.0.2-1733949986605): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 213 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 214 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3ef89537 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (java.util.concurrent.ThreadPoolExecutor$Worker@58ac8204[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@104630b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (java.util.concurrent.ThreadPoolExecutor$Worker@60ca88e8[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data6/current/BP-464186549-172.17.0.2-1733949986605): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data5/current/BP-464186549-172.17.0.2-1733949986605): State: TIMED_WAITING Blocked count: 4 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@8bd8419 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (java.util.concurrent.ThreadPoolExecutor$Worker@1bf3964e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57311): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 237 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 241 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 338 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 242 (SyncThread:0): State: WAITING Blocked count: 9 Waited count: 424 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@10b99afa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 243 (ProcessThread(sid:0 cport:57311):): State: WAITING Blocked count: 3 Waited count: 536 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@55d85242 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 244 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 565 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@31a189ac Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 245 (NIOWorkerThread-1): State: WAITING Blocked count: 6 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@35872e3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 642 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 33 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (Time-limited test-SendThread(127.0.0.1:57311)): State: RUNNABLE Blocked count: 37 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 260 (Time-limited test-EventThread): State: WAITING Blocked count: 9 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29d46d5d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 261 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (zk-event-processor-pool-0): State: WAITING Blocked count: 17 Waited count: 73 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@24ded2f4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-5): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-9): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-13): State: WAITING Blocked count: 1 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41779): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@64d163e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779): State: WAITING Blocked count: 104 Waited count: 491 Waiting on java.util.concurrent.Semaphore$NonfairSync@5b3f04ce Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779): State: WAITING Blocked count: 138 Waited count: 543 Waiting on java.util.concurrent.Semaphore$NonfairSync@510b2471 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41779): State: WAITING Blocked count: 49 Waited count: 12724 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@31015d6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41779): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6c318801 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41779): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6c318801 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=41779): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4f0bf83d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=41779): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@472b6501 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=41779): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@44166697 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=41779): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@24079f94 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 290 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@345d4139 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 291 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 335 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 39 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 288 (M:0;5be53b084ac7:41779): State: TIMED_WAITING Blocked count: 13 Waited count: 4911 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1094/0x00007fe2f4f78b90.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 358 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/5be53b084ac7:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (master/5be53b084ac7:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (org.apache.hadoop.hdfs.PeerCache@4f577923): State: TIMED_WAITING Blocked count: 0 Waited count: 224 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6679 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 399 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 41 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 49 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 175 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@19feda88 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 66661 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 435 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 73 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 436 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 27 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 456 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41b9b38e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 479 (regionserver/5be53b084ac7:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26d48e50 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 478 (regionserver/5be53b084ac7:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@49912a98 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 480 (regionserver/5be53b084ac7:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@31c79106 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 520 (region-location-0): State: WAITING Blocked count: 11 Waited count: 18 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3dee7173 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 524 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 66377 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 20 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 564 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 577 (region-location-1): State: WAITING Blocked count: 5 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3dee7173 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 578 (region-location-2): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3dee7173 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 984 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 925 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1047 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1077 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1091 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 69 Waited count: 112 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@728d1755 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1139 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1140 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1201 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1202 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1203 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1255 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1256 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1257 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1259 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1260 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1628 (Container metrics unregistration): State: WAITING Blocked count: 13 Waited count: 50 Waiting on java.util.TaskQueue@3ca6959e Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1654 (ForkJoinPool.commonPool-worker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 669 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 1855 (region-location-3): State: WAITING Blocked count: 4 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3dee7173 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1856 (region-location-4): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3dee7173 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2041 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5983 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5984 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8414 (ForkJoinPool.commonPool-worker-6): State: WAITING Blocked count: 0 Waited count: 34 Waiting on java.util.concurrent.ForkJoinPool@3f4ade90 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 10125 (AsyncFSWAL-1-hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/MasterData-prefix:5be53b084ac7,41779,1733949993169): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@557172c4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10134 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10139 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-11T20:58:01,792 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T20:58:31,792 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;5be53b084ac7:41779 229 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 44 Waited count: 18 Waiting on java.lang.ref.ReferenceQueue$Lock@d338b06 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 18 Waited count: 27 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4703f0a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 44 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 7427 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 75 Waiting on java.util.concurrent.CountDownLatch$Sync@7c90e78b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11482 Waited count: 12297 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 13 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@1761777e Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@4f3f0bd9 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@bf61b4): State: TIMED_WAITING Blocked count: 0 Waited count: 1478 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 148 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1915521514-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1915521514-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1915521514-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1915521514-40): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1915521514-41-acceptor-0@beacdb4-ServerConnector@4f00f70b{HTTP/1.1, (http/1.1)}{localhost:39731}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1915521514-42): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1915521514-43): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1915521514-44): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-11478400-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 38 Waited count: 3142 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18e9f045 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 36717): State: TIMED_WAITING Blocked count: 1 Waited count: 75 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 148 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@71ecfe19): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 246 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4afbb8fc): State: TIMED_WAITING Blocked count: 0 Waited count: 148 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 247 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 72088 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1434 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@453b10b2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 36717): State: TIMED_WAITING Blocked count: 82 Waited count: 2667 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 36717): State: TIMED_WAITING Blocked count: 105 Waited count: 2695 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 36717): State: TIMED_WAITING Blocked count: 85 Waited count: 2676 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 36717): State: TIMED_WAITING Blocked count: 91 Waited count: 2662 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 36717): State: TIMED_WAITING Blocked count: 92 Waited count: 2680 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5d18f67d): State: TIMED_WAITING Blocked count: 0 Waited count: 369 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@17d3a048): State: TIMED_WAITING Blocked count: 0 Waited count: 148 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@65a3f04a): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@316beb8c): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(253478452)): State: TIMED_WAITING Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp988370956-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp988370956-87-acceptor-0@1f5e2812-ServerConnector@50927177{HTTP/1.1, (http/1.1)}{localhost:33193}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp988370956-88): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp988370956-89): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-6fb23b47-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@11bf4d1): State: TIMED_WAITING Blocked count: 0 Waited count: 1474 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 46313): State: TIMED_WAITING Blocked count: 1 Waited count: 75 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 148 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 0 Waited count: 366 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c6cfb21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-464186549-172.17.0.2-1733949986605 heartbeating to localhost/127.0.0.1:36717): State: TIMED_WAITING Blocked count: 1472 Waited count: 1721 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3652a1d2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 46313): State: TIMED_WAITING Blocked count: 0 Waited count: 739 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 46313): State: TIMED_WAITING Blocked count: 0 Waited count: 740 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 46313): State: TIMED_WAITING Blocked count: 0 Waited count: 739 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 46313): State: TIMED_WAITING Blocked count: 0 Waited count: 738 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 46313): State: TIMED_WAITING Blocked count: 0 Waited count: 737 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 119 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 112 (IPC Client (166867004) connection to localhost/127.0.0.1:36717 from jenkins): State: TIMED_WAITING Blocked count: 1672 Waited count: 1673 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 113 (IPC Parameter Sending Thread for localhost/127.0.0.1:36717): State: TIMED_WAITING Blocked count: 0 Waited count: 2222 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp558478696-120): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp558478696-121-acceptor-0@4ee25c3b-ServerConnector@41d36900{HTTP/1.1, (http/1.1)}{localhost:40177}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp558478696-122): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp558478696-123): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-2536a978-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@65942ca9): State: TIMED_WAITING Blocked count: 0 Waited count: 1473 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 129 (IPC Server idle connection scanner for port 36451): State: TIMED_WAITING Blocked count: 1 Waited count: 75 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 131 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 148 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 134 (Command processor): State: WAITING Blocked count: 1 Waited count: 367 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c05b3ab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 135 (BP-464186549-172.17.0.2-1733949986605 heartbeating to localhost/127.0.0.1:36717): State: TIMED_WAITING Blocked count: 1473 Waited count: 1721 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5b3b9a90): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 127 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 137 (IPC Server handler 0 on default port 36451): State: TIMED_WAITING Blocked count: 0 Waited count: 740 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 138 (IPC Server handler 1 on default port 36451): State: TIMED_WAITING Blocked count: 0 Waited count: 739 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 2 on default port 36451): State: TIMED_WAITING Blocked count: 0 Waited count: 737 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 3 on default port 36451): State: TIMED_WAITING Blocked count: 0 Waited count: 739 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 4 on default port 36451): State: TIMED_WAITING Blocked count: 0 Waited count: 740 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1414918684-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1414918684-154-acceptor-0@760e8002-ServerConnector@3c465c66{HTTP/1.1, (http/1.1)}{localhost:33915}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1414918684-155): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1414918684-156): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-4356fc3b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4af8baf3): State: TIMED_WAITING Blocked count: 0 Waited count: 1472 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 38091): State: TIMED_WAITING Blocked count: 1 Waited count: 75 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 148 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 377 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1a0a5f17 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-464186549-172.17.0.2-1733949986605 heartbeating to localhost/127.0.0.1:36717): State: TIMED_WAITING Blocked count: 1454 Waited count: 1729 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7cd66212): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 740 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 739 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 740 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 743 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 38091): State: TIMED_WAITING Blocked count: 0 Waited count: 742 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data4)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data1)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data2)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 196 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data4/current/BP-464186549-172.17.0.2-1733949986605): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data1/current/BP-464186549-172.17.0.2-1733949986605): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data3/current/BP-464186549-172.17.0.2-1733949986605): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data2/current/BP-464186549-172.17.0.2-1733949986605): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 213 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 214 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3ef89537 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (java.util.concurrent.ThreadPoolExecutor$Worker@58ac8204[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@104630b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (java.util.concurrent.ThreadPoolExecutor$Worker@60ca88e8[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data6/current/BP-464186549-172.17.0.2-1733949986605): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data5/current/BP-464186549-172.17.0.2-1733949986605): State: TIMED_WAITING Blocked count: 4 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@8bd8419 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (java.util.concurrent.ThreadPoolExecutor$Worker@1bf3964e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57311): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 237 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 74 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 241 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 368 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 242 (SyncThread:0): State: WAITING Blocked count: 9 Waited count: 428 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@10b99afa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 243 (ProcessThread(sid:0 cport:57311):): State: WAITING Blocked count: 3 Waited count: 540 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@55d85242 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 244 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 569 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@31a189ac Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 245 (NIOWorkerThread-1): State: WAITING Blocked count: 6 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@35872e3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 670 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 33 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (Time-limited test-SendThread(127.0.0.1:57311)): State: RUNNABLE Blocked count: 37 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 260 (Time-limited test-EventThread): State: WAITING Blocked count: 9 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29d46d5d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 261 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (zk-event-processor-pool-0): State: WAITING Blocked count: 17 Waited count: 73 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@24ded2f4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-5): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-9): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-13): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fea4dae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41779): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@64d163e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779): State: WAITING Blocked count: 104 Waited count: 491 Waiting on java.util.concurrent.Semaphore$NonfairSync@5b3f04ce Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779): State: WAITING Blocked count: 138 Waited count: 543 Waiting on java.util.concurrent.Semaphore$NonfairSync@510b2471 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41779): State: WAITING Blocked count: 49 Waited count: 12724 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@31015d6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41779): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6c318801 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41779): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6c318801 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=41779): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4f0bf83d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=41779): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@472b6501 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=41779): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@44166697 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=41779): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@24079f94 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 290 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@345d4139 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 291 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 335 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 39 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 288 (M:0;5be53b084ac7:41779): State: TIMED_WAITING Blocked count: 13 Waited count: 4911 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1094/0x00007fe2f4f78b90.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 358 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 74 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/5be53b084ac7:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (master/5be53b084ac7:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (org.apache.hadoop.hdfs.PeerCache@4f577923): State: TIMED_WAITING Blocked count: 0 Waited count: 244 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 7279 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 399 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 41 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 49 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 175 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@19feda88 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 73 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 72662 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 435 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 73 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 436 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 27 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 456 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41b9b38e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 479 (regionserver/5be53b084ac7:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26d48e50 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 478 (regionserver/5be53b084ac7:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@49912a98 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 480 (regionserver/5be53b084ac7:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@31c79106 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 520 (region-location-0): State: WAITING Blocked count: 11 Waited count: 18 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3dee7173 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 524 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 72378 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 20 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 564 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 577 (region-location-1): State: WAITING Blocked count: 5 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3dee7173 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 578 (region-location-2): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3dee7173 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 984 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 931 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1047 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1077 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1091 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 69 Waited count: 112 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@728d1755 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1139 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1140 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1201 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1202 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1203 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1255 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1256 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1257 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1259 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1260 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1628 (Container metrics unregistration): State: WAITING Blocked count: 13 Waited count: 50 Waiting on java.util.TaskQueue@3ca6959e Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1855 (region-location-3): State: WAITING Blocked count: 4 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3dee7173 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1856 (region-location-4): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3dee7173 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2041 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5983 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5984 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8414 (ForkJoinPool.commonPool-worker-6): State: TIMED_WAITING Blocked count: 0 Waited count: 35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 10125 (AsyncFSWAL-1-hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/MasterData-prefix:5be53b084ac7,41779,1733949993169): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@557172c4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10139 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-11T20:59:01,793 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T20:59:24,366 DEBUG [Time-limited test {}] hbase.LocalHBaseCluster(398): Interrupted java.lang.InterruptedException: null at java.lang.Object.wait(Native Method) ~[?:?] at java.lang.Thread.join(Thread.java:1307) ~[?:?] at org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:111) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T20:59:24,390 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2b930908{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T20:59:24,391 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3c465c66{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T20:59:24,391 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T20:59:24,391 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ef7cd2a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T20:59:24,397 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@80b4649{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/hadoop.log.dir/,STOPPED} 2024-12-11T20:59:24,400 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T20:59:24,400 WARN [BP-464186549-172.17.0.2-1733949986605 heartbeating to localhost/127.0.0.1:36717 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T20:59:24,400 WARN [BP-464186549-172.17.0.2-1733949986605 heartbeating to localhost/127.0.0.1:36717 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-464186549-172.17.0.2-1733949986605 (Datanode Uuid 6bce5cc2-a500-49e3-aebf-6157469ea69c) service to localhost/127.0.0.1:36717 2024-12-11T20:59:24,400 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T20:59:24,401 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data5/current/BP-464186549-172.17.0.2-1733949986605 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T20:59:24,401 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data6/current/BP-464186549-172.17.0.2-1733949986605 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T20:59:24,402 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func ====> TEST TIMED OUT. PRINTING THREAD DUMP. <==== Timestamp: 2024-12-11 08:59:24,377 "MutableQuantiles-0" daemon prio=5 tid=984 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1915521514-42" daemon prio=5 tid=42 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "ConnnectionExpirer" daemon prio=5 tid=237 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) "IPC Server handler 3 on default port 46313" daemon prio=5 tid=106 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "master/5be53b084ac7:0:becomeActiveMaster-MemStoreChunkPool Statistics" daemon prio=5 tid=360 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Command processor" daemon prio=5 tid=134 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) "Command processor" daemon prio=5 tid=167 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) "qtp1414918684-156" daemon prio=5 tid=156 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-16" daemon prio=5 tid=5984 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data4)" daemon prio=5 tid=187 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) "IPC Server handler 3 on default port 36451" daemon prio=5 tid=140 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data2)" daemon prio=5 tid=188 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) "NIOWorkerThread-10" daemon prio=5 tid=270 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MarkedDeleteBlockScrubberThread" daemon prio=5 tid=48 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-6-thread-1" prio=5 tid=36 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-4" daemon prio=5 tid=263 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "region-location-2" daemon prio=5 tid=578 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-7" daemon prio=5 tid=1202 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@17d3a048" daemon prio=5 tid=72 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-5" daemon prio=5 tid=1139 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Socket Reader #1 for port 0" daemon prio=5 tid=128 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) at app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) "nioEventLoopGroup-4-1" prio=10 tid=125 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Time-limited test.named-queue-events-pool-0" daemon prio=5 tid=290 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) at app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) at app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) at app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-3" daemon prio=5 tid=262 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5d18f67d" daemon prio=5 tid=71 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server Responder" daemon prio=5 tid=163 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) at app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) "pool-12-thread-1" prio=5 tid=69 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 2 on default port 36451" daemon prio=5 tid=139 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "IPC Server handler 0 on default port 36717" daemon prio=5 tid=64 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "IPC Server handler 3 on default port 38091" daemon prio=5 tid=173 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41779" daemon prio=5 tid=283 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "DatanodeAdminMonitor-0" daemon prio=5 tid=62 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp988370956-89" daemon prio=5 tid=89 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Hadoop-Metrics-Updater-0" daemon prio=5 tid=58 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "master:store-WAL-Roller" daemon prio=5 tid=382 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) "IPC Server handler 3 on default port 36717" daemon prio=5 tid=67 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "RPCClient-NioEventLoopGroup-6-3" daemon prio=5 tid=527 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-15-thread-1" daemon prio=5 tid=214 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1414918684-154" daemon prio=5 tid=154 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-12" daemon prio=5 tid=272 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-15" daemon prio=5 tid=5983 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MiniHBaseClusterRegionServer-EventLoopGroup-4-1" daemon prio=10 tid=313 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Hadoop-Metrics-Updater-0" daemon prio=5 tid=164 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-10" daemon prio=5 tid=1256 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Parameter Sending Thread for localhost/127.0.0.1:36717" daemon prio=5 tid=113 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) at java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) at app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "M:0;5be53b084ac7:41779" daemon prio=5 tid=288 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) at app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) at app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) at app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) at app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) at app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1094/0x00007fe2f4f78b90.run(Unknown Source) at app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) at app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) at app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) at app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) at app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) at app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) at app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) at app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) at app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) at app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) at app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) at app//org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) at app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) at app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) at app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MiniHBaseClusterRegionServer-EventLoopGroup-3-3" daemon prio=10 tid=1140 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "nioEventLoopGroup-2-1" prio=10 tid=91 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Timer for 'JobHistoryServer' metrics system" daemon prio=5 tid=10139 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=41779" daemon prio=5 tid=286 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) at app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "RPCClient-NioEventLoopGroup-6-8" daemon prio=5 tid=1203 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Monitor thread for TaskMonitor" daemon prio=5 tid=358 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-26-thread-1" prio=5 tid=119 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "region-location-3" daemon prio=5 tid=1855 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MiniHBaseClusterRegionServer-EventLoopGroup-5-2" daemon prio=10 tid=399 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-1" daemon prio=5 tid=245 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server listener on 0" daemon prio=5 tid=54 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) "surefire-forkedjvm-stream-flusher" daemon prio=5 tid=16 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp988370956-86" daemon prio=5 tid=86 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 4 on default port 38091" daemon prio=5 tid=174 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "Notification Thread" daemon prio=9 tid=13 runnable java.lang.Thread.State: RUNNABLE "org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@316beb8c" daemon prio=5 tid=74 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server idle connection scanner for port 36451" daemon prio=5 tid=129 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "regionserver/5be53b084ac7:0.procedureResultReporter" daemon prio=5 tid=478 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) "qtp1915521514-38" daemon prio=5 tid=38 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1414918684-153" daemon prio=5 tid=153 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 4 on default port 46313" daemon prio=5 tid=107 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "qtp1915521514-43" daemon prio=5 tid=43 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Client (166867004) connection to localhost/127.0.0.1:36717 from jenkins" daemon prio=5 tid=112 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) at app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) "NIOServerCxnFactory.SelectorThread-0" daemon prio=5 tid=238 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) "NIOServerCxnFactory.SelectorThread-1" daemon prio=5 tid=239 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) "IPC Server Responder" daemon prio=5 tid=57 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) at app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) "BP-464186549-172.17.0.2-1733949986605 heartbeating to localhost/127.0.0.1:36717" daemon prio=5 tid=168 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1915521514-44" daemon prio=5 tid=44 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Async-Client-Retry-Timer-pool-0" daemon prio=5 tid=412 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) at app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "HMaster-EventLoopGroup-1-3" daemon prio=10 tid=436 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server listener on 0" daemon prio=5 tid=127 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) "Block report processor" daemon prio=5 tid=51 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) at app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) at app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) "NIOWorkerThread-7" daemon prio=5 tid=267 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "HMaster-EventLoopGroup-1-2" daemon prio=10 tid=435 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Signal Dispatcher" daemon prio=9 tid=4 runnable java.lang.Thread.State: RUNNABLE "IPC Server idle connection scanner for port 36717" daemon prio=5 tid=56 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "nioEventLoopGroup-6-1" prio=10 tid=158 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "region-location-1" daemon prio=5 tid=577 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1915521514-37" daemon prio=5 tid=37 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-16" daemon prio=5 tid=276 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RegionServerTracker-0" daemon prio=5 tid=456 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MiniHBaseClusterRegionServer-EventLoopGroup-4-3" daemon prio=10 tid=1091 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41779" daemon prio=5 tid=282 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data5/current/BP-464186549-172.17.0.2-1733949986605" daemon prio=5 tid=228 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcClient-timer-pool-0" daemon prio=5 tid=413 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) at app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp558478696-121-acceptor-0@4ee25c3b-ServerConnector@41d36900{HTTP/1.1, (http/1.1)}{localhost:40177}" daemon prio=3 tid=121 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) at app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) at app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 1 on default port 36451" daemon prio=5 tid=138 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41779" daemon prio=5 tid=281 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "qtp988370956-87-acceptor-0@1f5e2812-ServerConnector@50927177{HTTP/1.1, (http/1.1)}{localhost:33193}" daemon prio=3 tid=87 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) at app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) at app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server listener on 0" daemon prio=5 tid=93 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) "Finalizer" daemon prio=8 tid=3 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) at java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) "NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57311" daemon prio=5 tid=240 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data2/current/BP-464186549-172.17.0.2-1733949986605" daemon prio=5 tid=200 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-1-thread-2" daemon prio=5 tid=15 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) at java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) at java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.util.JvmPauseMonitor$Monitor@bf61b4" daemon prio=5 tid=34 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server Responder" daemon prio=5 tid=130 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) at app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) "RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41779" daemon prio=5 tid=278 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) at app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "Socket Reader #1 for port 0" daemon prio=5 tid=55 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) at app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) "ProcessThread(sid:0 cport:57311):" daemon prio=5 tid=243 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) "zk-event-processor-pool-0" daemon prio=5 tid=264 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-23-thread-1" daemon prio=5 tid=222 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-2" daemon prio=5 tid=261 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@65a3f04a" daemon prio=5 tid=73 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp988370956-88" daemon prio=5 tid=88 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-11" daemon prio=5 tid=271 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MiniHBaseClusterRegionServer-EventLoopGroup-3-1" daemon prio=10 tid=291 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-5" daemon prio=5 tid=265 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "regionserver/5be53b084ac7:0.procedureResultReporter" daemon prio=5 tid=479 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) "nioEventLoopGroup-6-2" prio=10 tid=10142 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Socket Reader #1 for port 0" daemon prio=5 tid=94 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) at app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) "pool-29-thread-1" prio=5 tid=136 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-1-thread-1" daemon prio=5 tid=14 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) at java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) at java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Common-Cleaner" daemon prio=8 tid=12 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) at java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) at java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) "qtp558478696-123" daemon prio=5 tid=123 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1915521514-40" daemon prio=5 tid=40 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 2 on default port 36717" daemon prio=5 tid=66 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "SessionTracker" daemon prio=5 tid=241 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) "IPC Server handler 1 on default port 36717" daemon prio=5 tid=65 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "Session-HouseKeeper-11478400-1" prio=5 tid=45 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Hadoop-Metrics-Updater-0" daemon prio=5 tid=97 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 4 on default port 36451" daemon prio=5 tid=141 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "RPCClient-NioEventLoopGroup-6-14" daemon prio=5 tid=2041 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "master/5be53b084ac7:0:becomeActiveMaster-MemStoreChunkPool Statistics" daemon prio=5 tid=362 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server Responder" daemon prio=5 tid=96 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) at app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) "RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41779" daemon prio=5 tid=280 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) at app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4afbb8fc" daemon prio=5 tid=49 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Session-HouseKeeper-6fb23b47-1" prio=5 tid=90 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "java.util.concurrent.ThreadPoolExecutor$Worker@60ca88e8[State = -1, empty queue]" daemon prio=5 tid=227 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Command processor" daemon prio=5 tid=100 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) "CacheReplicationMonitor(253478452)" daemon prio=5 tid=75 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) at app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) "IPC Server idle connection scanner for port 38091" daemon prio=5 tid=162 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "IPC Server idle connection scanner for port 46313" daemon prio=5 tid=95 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "NIOWorkerThread-8" daemon prio=5 tid=268 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-1-hdfs://localhost:36717/user/jenkins/test-data/ae0e6e42-965d-d4eb-3fb9-24323b2c1fe5/MasterData-prefix:5be53b084ac7,41779,1733949993169" daemon prio=5 tid=10125 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner" daemon prio=5 tid=23 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) at app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 2 on default port 38091" daemon prio=5 tid=172 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "Idle-Rpc-Conn-Sweeper-pool-0" daemon prio=5 tid=414 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1915521514-41-acceptor-0@beacdb4-ServerConnector@4f00f70b{HTTP/1.1, (http/1.1)}{localhost:39731}" daemon prio=3 tid=41 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) at app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) at app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "SyncThread:0" daemon prio=5 tid=242 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) "main" prio=5 tid=1 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/java.lang.Thread.dumpThreads(Native Method) at java.base@17.0.11/java.lang.Thread.getAllStackTraces(Thread.java:1671) at app//org.apache.hadoop.hbase.TimedOutTestsListener.buildThreadDump(TimedOutTestsListener.java:92) at app//org.apache.hadoop.hbase.TimedOutTestsListener.buildThreadDiagnosticString(TimedOutTestsListener.java:78) at app//org.apache.hadoop.hbase.TimedOutTestsListener.testFailure(TimedOutTestsListener.java:65) at app//org.junit.runner.notification.SynchronizedRunListener.testFailure(SynchronizedRunListener.java:94) at app//org.junit.runner.notification.RunNotifier$6.notifyListener(RunNotifier.java:177) at app//org.junit.runner.notification.RunNotifier$SafeNotifier.run(RunNotifier.java:72) at app//org.junit.runner.notification.RunNotifier.fireTestFailures(RunNotifier.java:173) at app//org.junit.runner.notification.RunNotifier.fireTestFailure(RunNotifier.java:167) at app//org.apache.maven.surefire.common.junit4.Notifier.fireTestFailure(Notifier.java:100) at app//org.junit.internal.runners.model.EachTestNotifier.addFailure(EachTestNotifier.java:23) at app//org.junit.internal.runners.model.EachTestNotifier.addMultipleFailureException(EachTestNotifier.java:29) at app//org.junit.internal.runners.model.EachTestNotifier.addFailure(EachTestNotifier.java:21) at app//org.junit.runners.ParentRunner.run(ParentRunner.java:419) at app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) at app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) at app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) at app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) at app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) at app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) at app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) at app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) "Socket Reader #1 for port 0" daemon prio=5 tid=161 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) at app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) "weak-ref-cleaner-strictcontextstorage" daemon prio=1 tid=256 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) at app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "BP-464186549-172.17.0.2-1733949986605 heartbeating to localhost/127.0.0.1:36717" daemon prio=5 tid=135 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 1 on default port 38091" daemon prio=5 tid=171 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "NIOWorkerThread-14" daemon prio=5 tid=274 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-7-thread-1" prio=5 tid=46 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data4/current/BP-464186549-172.17.0.2-1733949986605" daemon prio=5 tid=196 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=41779" daemon prio=5 tid=285 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) at app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "SSL Certificates Store Monitor" daemon prio=5 tid=25 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.Object.wait(Object.java:338) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "org.apache.hadoop.util.JvmPauseMonitor$Monitor@4af8baf3" daemon prio=5 tid=159 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RequestThrottler" daemon prio=5 tid=244 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) "zk-permission-watcher-pool-0" daemon prio=5 tid=1092 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data3)" daemon prio=5 tid=185 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) "NIOWorkerThread-15" daemon prio=5 tid=275 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.PeerCache@4f577923" daemon prio=5 tid=364 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) at app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) at app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 0 on default port 36451" daemon prio=5 tid=137 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data6/current/BP-464186549-172.17.0.2-1733949986605" daemon prio=5 tid=224 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-6" daemon prio=5 tid=1201 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-18-thread-1" prio=5 tid=85 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Time-limited test-EventThread" daemon prio=5 tid=260 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) "HMaster-EventLoopGroup-1-1" daemon prio=10 tid=258 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=41779" daemon prio=5 tid=284 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) at app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data3/current/BP-464186549-172.17.0.2-1733949986605" daemon prio=5 tid=199 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Time-limited test" daemon prio=5 tid=22 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at app//org.eclipse.jetty.util.component.ContainerLifeCycle.stop(ContainerLifeCycle.java:180) at app//org.eclipse.jetty.util.component.ContainerLifeCycle.doStop(ContainerLifeCycle.java:201) at app//org.eclipse.jetty.server.handler.AbstractHandler.doStop(AbstractHandler.java:108) at app//org.eclipse.jetty.server.session.SessionHandler.doStop(SessionHandler.java:520) at app//org.eclipse.jetty.util.component.AbstractLifeCycle.stop(AbstractLifeCycle.java:94) at app//org.eclipse.jetty.util.component.ContainerLifeCycle.stop(ContainerLifeCycle.java:180) at app//org.eclipse.jetty.util.component.ContainerLifeCycle.doStop(ContainerLifeCycle.java:201) at app//org.eclipse.jetty.server.handler.AbstractHandler.doStop(AbstractHandler.java:108) at app//org.eclipse.jetty.server.handler.ContextHandler.stopContext(ContextHandler.java:1066) at app//org.eclipse.jetty.servlet.ServletContextHandler.stopContext(ServletContextHandler.java:386) at app//org.eclipse.jetty.server.handler.ContextHandler.doStop(ContextHandler.java:1120) at app//org.eclipse.jetty.servlet.ServletContextHandler.doStop(ServletContextHandler.java:297) at app//org.eclipse.jetty.util.component.AbstractLifeCycle.stop(AbstractLifeCycle.java:94) at app//org.eclipse.jetty.util.component.ContainerLifeCycle.stop(ContainerLifeCycle.java:180) at app//org.eclipse.jetty.util.component.ContainerLifeCycle.doStop(ContainerLifeCycle.java:201) at app//org.eclipse.jetty.server.handler.AbstractHandler.doStop(AbstractHandler.java:108) at app//org.eclipse.jetty.util.component.AbstractLifeCycle.stop(AbstractLifeCycle.java:94) at app//org.eclipse.jetty.util.component.ContainerLifeCycle.stop(ContainerLifeCycle.java:180) at app//org.eclipse.jetty.util.component.ContainerLifeCycle.doStop(ContainerLifeCycle.java:201) at app//org.eclipse.jetty.server.handler.AbstractHandler.doStop(AbstractHandler.java:108) at app//org.eclipse.jetty.util.component.AbstractLifeCycle.stop(AbstractLifeCycle.java:94) at app//org.eclipse.jetty.util.component.ContainerLifeCycle.stop(ContainerLifeCycle.java:180) at app//org.eclipse.jetty.util.component.ContainerLifeCycle.doStop(ContainerLifeCycle.java:201) at app//org.eclipse.jetty.server.handler.AbstractHandler.doStop(AbstractHandler.java:108) at app//org.eclipse.jetty.server.handler.StatisticsHandler.doStop(StatisticsHandler.java:272) at app//org.eclipse.jetty.util.component.AbstractLifeCycle.stop(AbstractLifeCycle.java:94) at app//org.eclipse.jetty.util.component.ContainerLifeCycle.stop(ContainerLifeCycle.java:180) at app//org.eclipse.jetty.util.component.ContainerLifeCycle.doStop(ContainerLifeCycle.java:201) at app//org.eclipse.jetty.server.handler.AbstractHandler.doStop(AbstractHandler.java:108) at app//org.eclipse.jetty.server.Server.doStop(Server.java:470) at app//org.eclipse.jetty.util.component.AbstractLifeCycle.stop(AbstractLifeCycle.java:94) at app//org.apache.hadoop.http.HttpServer2.stop(HttpServer2.java:1595) at app//org.apache.hadoop.hdfs.server.datanode.web.DatanodeHttpServer.close(DatanodeHttpServer.java:346) at app//org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:2573) at app//org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNode(MiniDFSCluster.java:2232) at app//org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:2222) at app//org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2201) at app//org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) at app//org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) at app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) at app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) at app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) at app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at app//org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at app//org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-9" daemon prio=5 tid=1255 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MiniHBaseClusterRegionServer-EventLoopGroup-5-3" daemon prio=10 tid=400 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MiniHBaseClusterRegionServer-EventLoopGroup-3-2" daemon prio=10 tid=1077 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RedundancyMonitor" daemon prio=5 tid=47 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) at java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) at app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp558478696-122" daemon prio=5 tid=122 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 1 on default port 46313" daemon prio=5 tid=104 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "SnapshotHandlerChoreCleaner" daemon prio=5 tid=425 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100" daemon prio=5 tid=35 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) "org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5b3b9a90" daemon prio=5 tid=118 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) at java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) at app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-6" daemon prio=5 tid=266 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 0 on default port 46313" daemon prio=5 tid=103 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "HBase-Metrics2-1" daemon prio=5 tid=257 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data1)" daemon prio=5 tid=186 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) "org.apache.hadoop.util.JvmPauseMonitor$Monitor@11bf4d1" daemon prio=5 tid=92 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "BP-464186549-172.17.0.2-1733949986605 heartbeating to localhost/127.0.0.1:36717" daemon prio=5 tid=101 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-33-thread-1" daemon prio=5 tid=232 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-11" daemon prio=5 tid=1257 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.util.JvmPauseMonitor$Monitor@65942ca9" daemon prio=5 tid=126 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "java.util.concurrent.ThreadPoolExecutor$Worker@58ac8204[State = -1, empty queue]" daemon prio=5 tid=220 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1414918684-155" daemon prio=5 tid=155 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-13" daemon prio=5 tid=1260 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Container metrics unregistration" daemon prio=5 tid=1628 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.Object.wait(Object.java:338) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "Hadoop-Metrics-Updater-0" daemon prio=5 tid=131 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-4" daemon prio=5 tid=1047 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "region-location-4" daemon prio=5 tid=1856 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 0 on default port 38091" daemon prio=5 tid=170 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=41779" daemon prio=5 tid=287 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) at app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "IPC Server handler 2 on default port 46313" daemon prio=5 tid=105 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "FsDatasetAsyncDiskServiceFixer" daemon prio=5 tid=236 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) "org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3652a1d2" daemon prio=5 tid=84 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) at java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) at app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MiniHBaseClusterRegionServer-EventLoopGroup-5-1" daemon prio=10 tid=335 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1915521514-39" daemon prio=5 tid=39 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/777c0b2b-a2cc-daac-c9ac-56d057cd0e06/cluster_af6bb29d-6b46-051a-495b-5f3ca47b82ab/data/data1/current/BP-464186549-172.17.0.2-1733949986605" daemon prio=5 tid=197 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@71ecfe19" daemon prio=5 tid=61 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Time-limited test-SendThread(127.0.0.1:57311)" daemon prio=5 tid=259 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) at app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) "pool-20-thread-1" prio=5 tid=102 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Session-HouseKeeper-2536a978-1" prio=5 tid=124 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-12" daemon prio=5 tid=1259 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp558478696-120" daemon prio=5 tid=120 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fe2f442d2a8.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "region-location-0" daemon prio=5 tid=520 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server listener on 0" daemon prio=5 tid=160 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) "RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41779" daemon prio=5 tid=279 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) at app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "MiniHBaseClusterRegionServer-EventLoopGroup-4-2" daemon prio=10 tid=564 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "FSEditLogAsync" daemon prio=5 tid=53 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) at app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) at app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 4 on default port 36717" daemon prio=5 tid=68 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "RPCClient-NioEventLoopGroup-6-2" daemon prio=5 tid=525 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-9" daemon prio=5 tid=269 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Reference Handler" daemon prio=10 tid=2 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) at java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) at java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) "NIOWorkerThread-13" daemon prio=5 tid=273 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "surefire-forkedjvm-command-thread" daemon prio=5 tid=18 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) at java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) at app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) at java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) at java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) at java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) at app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) at app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) at app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) at app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) at app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) at app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) at app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) at app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-1" daemon prio=5 tid=524 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "regionserver/5be53b084ac7:0.procedureResultReporter" daemon prio=5 tid=480 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75)